comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Can you please add screenshot for non-404 case? There will be standard otel attributes recorded for exception (`exception.type` and `exceptions.message`)? @trask I believe you wanted to have otel semantic exception attributes attached, they have changed and now we are populating new ones + old ones. Are you ok with cosmos removing `error.msg` and `error.type`? Do you believe we use them anywhere? | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
since this is no longer an exception, it shouldn't have exception attributes - `otel.status_description` gives user the info they need | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
yes, I think we should remove those, I had similar thought a few weeks ago but got distracted from opening PR 😓: https://github.com/trask/azure-sdk-for-java/commit/af44c693a9e4bbf1013857ec34724a3412216d0f | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
@trask Removed error.msg and error.type. @lmolkova please see the screen shot when it is not 404/0 (item or collection not found). We cannot differentiate between item and collection not found without parsing cosmos exception diagnostics and it will be error prone in future. However if the customer logging the exception they can still see the full diagnostics and differentiate  | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
Removed error.msg and error.type | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
} | tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context); | private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} | class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
} |
I'm not sure that you need to disable NotBefore checking, but it won't hurt to have it. | void testAttestSgxEnclaveAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
final AtomicBoolean callbackCalled = new AtomicBoolean(false);
AttestationOptions request = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY))
.setValidationOptions(new AttestationTokenValidationOptions()
.setValidationCallback((token, signer) -> {
callbackCalled.set(true);
logger.info("In validation callback, checking token...");
logger.info(String.format(" Token issuer: %s", token.getIssuer()));
if (!interceptorManager.isPlaybackMode()) {
logger.info(String.format(" Token was issued at: %tc", token.getIssuedAt().getEpochSecond()));
logger.info(String.format(" Token expires at: %tc", token.getExpiresOn().getEpochSecond()));
if (!token.getIssuer().equals(clientUri)) {
logger.error(String.format("Token issuer %s does not match expected issuer %s",
token.getIssuer(), clientUri
));
throw new RuntimeException(String.format("Issuer Mismatch: found %s, expected %s", token.getIssuer(), clientUri));
}
logger.info(String.format("Issuer of signing certificate is: %s", signer.getCertificates().get(0).getIssuerDN().getName()));
}
})
.setValidateNotBefore(getTestMode() != TestMode.PLAYBACK)
.setValidateExpiresOn(getTestMode() != TestMode.PLAYBACK));
StepVerifier.create(client.attestSgxEnclave(request))
.assertNext(result -> {
assertTrue(callbackCalled.get());
verifyAttestationResult(clientUri, result, decodedRuntimeData, false);
})
.expectComplete()
.verify();
} | .setValidateNotBefore(getTestMode() != TestMode.PLAYBACK) | void testAttestSgxEnclaveAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
final AtomicBoolean callbackCalled = new AtomicBoolean(false);
AttestationOptions request = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY))
.setValidationOptions(new AttestationTokenValidationOptions()
.setValidationCallback((token, signer) -> {
callbackCalled.set(true);
logger.info("In validation callback, checking token...");
logger.info(String.format(" Token issuer: %s", token.getIssuer()));
if (!interceptorManager.isPlaybackMode()) {
logger.info(String.format(" Token was issued at: %tc", token.getIssuedAt().getEpochSecond()));
logger.info(String.format(" Token expires at: %tc", token.getExpiresOn().getEpochSecond()));
if (!token.getIssuer().equals(clientUri)) {
logger.error(String.format("Token issuer %s does not match expected issuer %s",
token.getIssuer(), clientUri
));
throw new RuntimeException(String.format("Issuer Mismatch: found %s, expected %s", token.getIssuer(), clientUri));
}
logger.info(String.format("Issuer of signing certificate is: %s", signer.getCertificates().get(0).getIssuerDN().getName()));
}
})
.setValidateExpiresOn(getTestMode() != TestMode.PLAYBACK));
StepVerifier.create(client.attestSgxEnclave(request))
.assertNext(result -> {
assertTrue(callbackCalled.get());
verifyAttestationResult(clientUri, result, decodedRuntimeData, false);
})
.expectComplete()
.verify();
} | class AttestationTest extends AttestationClientTestBase {
private static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private final String runtimeData =
"CiAgICAgICAgewogI"
+ "CAgICAgICAgICAiandrIiA6IHsKICAgICAgICAgICAgICAgICJrdHkiOiJFQyIsCiAg"
+ "ICAgICAgICAgICAgICAidXNlIjoic2lnIiwKICAgICAgICAgICAgICAgICJjcnYiOiJ"
+ "QLTI1NiIsCiAgICAgICAgICAgICAgICAieCI6IjE4d0hMZUlnVzl3Vk42VkQxVHhncH"
+ "F5MkxzellrTWY2SjhualZBaWJ2aE0iLAogICAgICAgICAgICAgICAgInkiOiJjVjRkU"
+ "zRVYUxNZ1BfNGZZNGo4aXI3Y2wxVFhsRmRBZ2N4NTVvN1RrY1NBIgogICAgICAgICAg"
+ "ICB9CiAgICAgICAgfQogICAgICAgIA";
private final String openEnclaveReport =
"AQAAAAIAAADkEQAAAAAAAAMAAg"
+ "AAAAAABQAKAJOacjP3nEyplAoNs5V_Bgc42MPzGo7hPWS_h-3tExJrAAAAABERAwX_g"
+ "AYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAA"
+ "BwAAAAAAAAC3eSAmGL7LY2do5dkC8o1SQiJzX6-1OeqboHw_wXGhwgAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAALBpElSroIHE1xsKbdbjAKTcu6UtnfhXCC9QjQP"
+ "ENQaoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB"
+ "AAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAA7RGp65ffwXBToyppkucdBPfsmW5FUZq3EJNq-0j5BB0AAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAQAAB4iv_XjOJsrFMrPvIYOBCeMR2q6"
+ "xB08KluTNAtIgpZQUIzLNyy78Gmb5LE77UIVye2sao77dOGiz3wP2f5jhEE5iovgPhy"
+ "6-Qg8JQkqe8XJI6B5ZlWsfq3E7u9EvH7ZZ33MihT7aM-sXca4u92L8OIhpM2cfJguOS"
+ "AS3Q4pR4NdRERAwX_gAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAABUAAAAAAAAABwAAAAAAAAA_sKzghp0uMPKOhtcMdmQDpU-7zWWO7ODhuUipF"
+ "VkXQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAjE9XddeWUD6WE393xoqC"
+ "mgBWrI3tcBQLCBsJRJDFe_8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAABAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD9rOmAu-jSSf1BAj_cC0mu7YCnx4QosD"
+ "78yj3sQX81IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH5Au8JZ_dpXiLY"
+ "aE1TtyGjGz0dtFZa7eGooRGTQzoJJuR8Xj-zUvyCKE4ABy0pajfE8lOGSUHuJoifisJ"
+ "NAhg4gAAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fBQDIDQAALS0tLS1CR"
+ "UdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVmekNDQkNhZ0F3SUJBZ0lVRk5xSnZZZTU4"
+ "ZXlpUjI2Yzd0L2lxU0pNYnFNd0NnWUlLb1pJemowRUF3SXcKY1RFak1DRUdBMVVFQXd"
+ "3YVNXNTBaV3dnVTBkWUlGQkRTeUJRY205alpYTnpiM0lnUTBFeEdqQVlCZ05WQkFvTQ"
+ "pFVWx1ZEdWc0lFTnZjbkJ2Y21GMGFXOXVNUlF3RWdZRFZRUUhEQXRUWVc1MFlTQkRiR"
+ "0Z5WVRFTE1Ba0dBMVVFCkNBd0NRMEV4Q3pBSkJnTlZCQVlUQWxWVE1CNFhEVEl4TURR"
+ "eU1USXdOVGt6T0ZvWERUSTRNRFF5TVRJd05Ua3oKT0Zvd2NERWlNQ0FHQTFVRUF3d1p"
+ "TVzUwWld3Z1UwZFlJRkJEU3lCRFpYSjBhV1pwWTJGMFpURWFNQmdHQTFVRQpDZ3dSU1"
+ "c1MFpXd2dRMjl5Y0c5eVlYUnBiMjR4RkRBU0JnTlZCQWNNQzFOaGJuUmhJRU5zWVhKa"
+ "E1Rc3dDUVlEClZRUUlEQUpEUVRFTE1Ba0dBMVVFQmhNQ1ZWTXdXVEFUQmdjcWhrak9Q"
+ "UUlCQmdncWhrak9QUU1CQndOQ0FBUTgKU2V1NWV4WCtvMGNkclhkeEtHMGEvQXRzdnV"
+ "lNVNoUFpmOHgwa2czc0xSM2E5TzVHWWYwcW1XSkptL0c4bzZyVgpvbVI2Nmh3cFJXNl"
+ "pqSm9ocXdvT280SUNtekNDQXBjd0h3WURWUjBqQkJnd0ZvQVUwT2lxMm5YWCtTNUpGN"
+ "Wc4CmV4UmwwTlh5V1Uwd1h3WURWUjBmQkZnd1ZqQlVvRktnVUlaT2FIUjBjSE02THk5"
+ "aGNHa3VkSEoxYzNSbFpITmwKY25acFkyVnpMbWx1ZEdWc0xtTnZiUzl6WjNndlkyVnl"
+ "kR2xtYVdOaGRHbHZiaTkyTWk5d1kydGpjbXcvWTJFOQpjSEp2WTJWemMyOXlNQjBHQT"
+ "FVZERnUVdCQlFzbnhWelhVWnhwRkd5YUtXdzhWZmdOZXBjcHpBT0JnTlZIUThCCkFmO"
+ "EVCQU1DQnNBd0RBWURWUjBUQVFIL0JBSXdBRENDQWRRR0NTcUdTSWI0VFFFTkFRU0NB"
+ "Y1V3Z2dIQk1CNEcKQ2lxR1NJYjRUUUVOQVFFRUVEeEI4dUNBTVU0bmw1ZlBFaktxdG8"
+ "wd2dnRmtCZ29xaGtpRytFMEJEUUVDTUlJQgpWREFRQmdzcWhraUcrRTBCRFFFQ0FRSU"
+ "JFVEFRQmdzcWhraUcrRTBCRFFFQ0FnSUJFVEFRQmdzcWhraUcrRTBCCkRRRUNBd0lCQ"
+ "WpBUUJnc3Foa2lHK0UwQkRRRUNCQUlCQkRBUUJnc3Foa2lHK0UwQkRRRUNCUUlCQVRB"
+ "UkJnc3EKaGtpRytFMEJEUUVDQmdJQ0FJQXdFQVlMS29aSWh2aE5BUTBCQWdjQ0FRWXd"
+ "FQVlMS29aSWh2aE5BUTBCQWdnQwpBUUF3RUFZTEtvWklodmhOQVEwQkFna0NBUUF3RU"
+ "FZTEtvWklodmhOQVEwQkFnb0NBUUF3RUFZTEtvWklodmhOCkFRMEJBZ3NDQVFBd0VBW"
+ "UxLb1pJaHZoTkFRMEJBZ3dDQVFBd0VBWUxLb1pJaHZoTkFRMEJBZzBDQVFBd0VBWUwK"
+ "S29aSWh2aE5BUTBCQWc0Q0FRQXdFQVlMS29aSWh2aE5BUTBCQWc4Q0FRQXdFQVlMS29"
+ "aSWh2aE5BUTBCQWhBQwpBUUF3RUFZTEtvWklodmhOQVEwQkFoRUNBUW93SHdZTEtvWk"
+ "lodmhOQVEwQkFoSUVFQkVSQWdRQmdBWUFBQUFBCkFBQUFBQUF3RUFZS0tvWklodmhOQ"
+ "VEwQkF3UUNBQUF3RkFZS0tvWklodmhOQVEwQkJBUUdBSkJ1MVFBQU1BOEcKQ2lxR1NJ"
+ "YjRUUUVOQVFVS0FRQXdDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdjREZEZHl1UFRHRVR"
+ "ORm5BU0QzOApDWTNSNmlBREpEVHZBbHZTWDNIekk4a0NJRDZsVm1DWklYUHk4ekpKMW"
+ "gvMnJ1NjJsdlVVWDJJaU1ibVFOUEEwClBzMC8KLS0tLS1FTkQgQ0VSVElGSUNBVEUtL"
+ "S0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQ2x6Q0NBajZnQXdJQkFn"
+ "SVZBTkRvcXRwMTEva3VTUmVZUEhzVVpkRFY4bGxOTUFvR0NDcUdTTTQ5QkFNQwpNR2d"
+ "4R2pBWUJnTlZCQU1NRVVsdWRHVnNJRk5IV0NCU2IyOTBJRU5CTVJvd0dBWURWUVFLRE"
+ "JGSmJuUmxiQ0JECmIzSndiM0poZEdsdmJqRVVNQklHQTFVRUJ3d0xVMkZ1ZEdFZ1Eye"
+ "GhjbUV4Q3pBSkJnTlZCQWdNQWtOQk1Rc3cKQ1FZRFZRUUdFd0pWVXpBZUZ3MHhPREEx"
+ "TWpFeE1EUTFNRGhhRncwek16QTFNakV4TURRMU1EaGFNSEV4SXpBaApCZ05WQkFNTUd"
+ "rbHVkR1ZzSUZOSFdDQlFRMHNnVUhKdlkyVnpjMjl5SUVOQk1Sb3dHQVlEVlFRS0RCRk"
+ "piblJsCmJDQkRiM0p3YjNKaGRHbHZiakVVTUJJR0ExVUVCd3dMVTJGdWRHRWdRMnhoY"
+ "21FeEN6QUpCZ05WQkFnTUFrTkIKTVFzd0NRWURWUVFHRXdKVlV6QlpNQk1HQnlxR1NN"
+ "NDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJMOXErTk1wMklPZwp0ZGwxYmsvdVdaNStUR1F"
+ "tOGFDaTh6NzhmcytmS0NRM2QrdUR6WG5WVEFUMlpoRENpZnlJdUp3dk4zd05CcDlpCk"
+ "hCU1NNSk1KckJPamdic3dnYmd3SHdZRFZSMGpCQmd3Rm9BVUltVU0xbHFkTkluemc3U"
+ "1ZVcjlRR3prbkJxd3cKVWdZRFZSMGZCRXN3U1RCSG9FV2dRNFpCYUhSMGNITTZMeTlq"
+ "WlhKMGFXWnBZMkYwWlhNdWRISjFjM1JsWkhObApjblpwWTJWekxtbHVkR1ZzTG1OdmJ"
+ "TOUpiblJsYkZOSFdGSnZiM1JEUVM1amNtd3dIUVlEVlIwT0JCWUVGTkRvCnF0cDExL2"
+ "t1U1JlWVBIc1VaZERWOGxsTk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBU0JnTlZIUk1CQ"
+ "WY4RUNEQUcKQVFIL0FnRUFNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQy85ais4NFQr"
+ "SHp0Vk8vc09RQldKYlNkKy8ydWV4Swo0K2FBMGpjRkJMY3BBaUEzZGhNckY1Y0Q1MnQ"
+ "2RnFNdkFJcGo4WGRHbXkyYmVlbGpMSksrcHpwY1JBPT0KLS0tLS1FTkQgQ0VSVElGSU"
+ "NBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQ2pqQ0NBalNnQ"
+ "XdJQkFnSVVJbVVNMWxxZE5JbnpnN1NWVXI5UUd6a25CcXd3Q2dZSUtvWkl6ajBFQXdJ"
+ "dwphREVhTUJnR0ExVUVBd3dSU1c1MFpXd2dVMGRZSUZKdmIzUWdRMEV4R2pBWUJnTlZ"
+ "CQW9NRVVsdWRHVnNJRU52CmNuQnZjbUYwYVc5dU1SUXdFZ1lEVlFRSERBdFRZVzUwWV"
+ "NCRGJHRnlZVEVMTUFrR0ExVUVDQXdDUTBFeEN6QUoKQmdOVkJBWVRBbFZUTUI0WERUR"
+ "TRNRFV5TVRFd05ERXhNVm9YRFRNek1EVXlNVEV3TkRFeE1Gb3dhREVhTUJnRwpBMVVF"
+ "QXd3UlNXNTBaV3dnVTBkWUlGSnZiM1FnUTBFeEdqQVlCZ05WQkFvTUVVbHVkR1ZzSUV"
+ "OdmNuQnZjbUYwCmFXOXVNUlF3RWdZRFZRUUhEQXRUWVc1MFlTQkRiR0Z5WVRFTE1Ba0"
+ "dBMVVFQ0F3Q1EwRXhDekFKQmdOVkJBWVQKQWxWVE1Ga3dFd1lIS29aSXpqMENBUVlJS"
+ "29aSXpqMERBUWNEUWdBRUM2bkV3TURJWVpPai9pUFdzQ3phRUtpNwoxT2lPU0xSRmhX"
+ "R2pibkJWSmZWbmtZNHUzSWprRFlZTDBNeE80bXFzeVlqbEJhbFRWWXhGUDJzSkJLNXp"
+ "sS09CCnV6Q0J1REFmQmdOVkhTTUVHREFXZ0JRaVpReldXcDAwaWZPRHRKVlN2MUFiT1"
+ "NjR3JEQlNCZ05WSFI4RVN6QkoKTUVlZ1JhQkRoa0ZvZEhSd2N6b3ZMMk5sY25ScFptb"
+ "GpZWFJsY3k1MGNuVnpkR1ZrYzJWeWRtbGpaWE11YVc1MApaV3d1WTI5dEwwbHVkR1Zz"
+ "VTBkWVVtOXZkRU5CTG1OeWJEQWRCZ05WSFE0RUZnUVVJbVVNMWxxZE5JbnpnN1NWClV"
+ "yOVFHemtuQnF3d0RnWURWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWU"
+ "JBZjhDQVFFd0NnWUkKS29aSXpqMEVBd0lEU0FBd1JRSWdRUXMvMDhyeWNkUGF1Q0ZrO"
+ "FVQUVhDTUFsc2xvQmU3TndhUUdUY2RwYTBFQwpJUUNVdDhTR3Z4S21qcGNNL3owV1A5"
+ "RHZvOGgyazVkdTFpV0RkQmtBbiswaWlBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0"
+ "tLQoA";
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclave(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY));
AttestationResult result = client.attestSgxEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveNoRuntimeData(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationResult result = client.attestSgxEnclave(sgxQuote);
verifyAttestationResult(clientUri, result, null, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveRuntimeJson(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
AttestationResult result = client.attestSgxEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveDraftPolicy(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
Response<AttestationResult> response = client.attestSgxEnclaveWithResponse(request, Context.NONE);
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
assertTrue(attestResponse.getToken().getAlgorithm() == "none");
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveNoRuntimeDataAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote);
StepVerifier.create(client.attestSgxEnclave(request))
.assertNext(result -> verifyAttestationResult(clientUri, result, null, false))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveRuntimeJsonAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions options = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestSgxEnclave(options))
.assertNext(result -> verifyAttestationResult(clientUri, result, decodedRuntimeData, true))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveDraftPolicyAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestSgxEnclaveWithResponse(request))
.assertNext(response -> {
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
})
.expectComplete()
.verify();
}
@Test()
void testAttestationOptions() {
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request1 = new AttestationOptions(decodedOpenEnclaveReport);
AttestationOptions request2 = new AttestationOptions(decodedOpenEnclaveReport)
.setInitTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON))
.setInitTimeData(new AttestationData(decodedOpenEnclaveReport, AttestationDataInterpretation.BINARY))
.setRunTimeData(new AttestationData(BinaryData.fromBytes(new byte[]{1, 2, 3, 4, 5}), AttestationDataInterpretation.BINARY));
assertArrayEquals(decodedOpenEnclaveReport.toBytes(), request2.getInitTimeData().getData().toBytes());
assertArrayEquals(new byte[]{1, 2, 3, 4, 5}, request2.getRunTimeData().getData().toBytes());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclave(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY));
AttestationResult result = client.attestOpenEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveNoRuntimeData(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport);
AttestationResult result = client.attestOpenEnclave(request);
verifyAttestationResult(clientUri, result, null, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveRuntimeJson(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
AttestationResult result = client.attestOpenEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveDraftPolicy(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
Response<AttestationResult> response = client.attestOpenEnclaveWithResponse(request, Context.NONE);
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
assertTrue(attestResponse.getToken().getAlgorithm() == "none");
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions options = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY));
StepVerifier.create(client.attestOpenEnclave(options))
.assertNext(result -> verifyAttestationResult(clientUri, result, decodedRuntimeData, false))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveNoRuntimeDataAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
StepVerifier.create(client.attestOpenEnclave(decodedOpenEnclaveReport))
.assertNext(result -> verifyAttestationResult(clientUri, result, null, false))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveRuntimeJsonAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions options = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestOpenEnclave(options))
.assertNext(result -> verifyAttestationResult(clientUri, result, decodedRuntimeData, true))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveDraftPolicyAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions options = new AttestationOptions(decodedOpenEnclaveReport)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestOpenEnclaveWithResponse(options))
.assertNext(response -> {
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
})
.expectComplete()
.verify();
}
private void verifyAttestationResult(String clientUri, AttestationResult result, BinaryData runtimeData, boolean expectJson) {
assertNotNull(result.getIssuer());
if (testContextManager.getTestMode() != TestMode.PLAYBACK) {
Assertions.assertEquals(clientUri, result.getIssuer());
}
assertNotNull(result.getMrEnclave());
assertNotNull(result.getMrSigner());
assertNotNull(result.getSvn());
assertNull(result.getNonce());
if (expectJson) {
ObjectMapper mapper = new ObjectMapper();
assertTrue(result.getRuntimeClaims() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, Object> runtimeClaims = (Map<String, Object>) result.getRuntimeClaims();
@SuppressWarnings("unchecked")
Map<String, Object> expectedClaims = assertDoesNotThrow(() -> (Map<String, Object>) mapper.readValue(runtimeData.toBytes(), Object.class));
assertObjectEqual(expectedClaims, runtimeClaims);
} else if (runtimeData != null) {
Assertions.assertArrayEquals(runtimeData.toBytes(), result.getEnclaveHeldData().toBytes());
}
}
void assertObjectEqual(Map<String, Object> expected, Map<String, Object> actual) {
expected.forEach((key, o) -> {
logger.verbose("Key: " + key);
assertTrue(actual.containsKey(key));
if (expected.get(key) instanceof Map) {
assertTrue(actual.get(key) instanceof Map);
@SuppressWarnings("unchecked")
Map<String, Object> expectedInner = (Map<String, Object>) expected.get(key);
@SuppressWarnings("unchecked")
Map<String, Object> actualInner = (Map<String, Object>) actual.get(key);
assertObjectEqual(expectedInner, actualInner);
} else {
assertEquals(o, actual.get(key));
}
});
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpm() {
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpmAsync() {
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpmWithResponse() {
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpmWithResponseAsync() {
}
} | class AttestationTest extends AttestationClientTestBase {
private static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private final String runtimeData =
"CiAgICAgICAgewogI"
+ "CAgICAgICAgICAiandrIiA6IHsKICAgICAgICAgICAgICAgICJrdHkiOiJFQyIsCiAg"
+ "ICAgICAgICAgICAgICAidXNlIjoic2lnIiwKICAgICAgICAgICAgICAgICJjcnYiOiJ"
+ "QLTI1NiIsCiAgICAgICAgICAgICAgICAieCI6IjE4d0hMZUlnVzl3Vk42VkQxVHhncH"
+ "F5MkxzellrTWY2SjhualZBaWJ2aE0iLAogICAgICAgICAgICAgICAgInkiOiJjVjRkU"
+ "zRVYUxNZ1BfNGZZNGo4aXI3Y2wxVFhsRmRBZ2N4NTVvN1RrY1NBIgogICAgICAgICAg"
+ "ICB9CiAgICAgICAgfQogICAgICAgIA";
private final String openEnclaveReport =
"AQAAAAIAAADkEQAAAAAAAAMAAg"
+ "AAAAAABQAKAJOacjP3nEyplAoNs5V_Bgc42MPzGo7hPWS_h-3tExJrAAAAABERAwX_g"
+ "AYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAA"
+ "BwAAAAAAAAC3eSAmGL7LY2do5dkC8o1SQiJzX6-1OeqboHw_wXGhwgAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAALBpElSroIHE1xsKbdbjAKTcu6UtnfhXCC9QjQP"
+ "ENQaoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB"
+ "AAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAA7RGp65ffwXBToyppkucdBPfsmW5FUZq3EJNq-0j5BB0AAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAQAAB4iv_XjOJsrFMrPvIYOBCeMR2q6"
+ "xB08KluTNAtIgpZQUIzLNyy78Gmb5LE77UIVye2sao77dOGiz3wP2f5jhEE5iovgPhy"
+ "6-Qg8JQkqe8XJI6B5ZlWsfq3E7u9EvH7ZZ33MihT7aM-sXca4u92L8OIhpM2cfJguOS"
+ "AS3Q4pR4NdRERAwX_gAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAABUAAAAAAAAABwAAAAAAAAA_sKzghp0uMPKOhtcMdmQDpU-7zWWO7ODhuUipF"
+ "VkXQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAjE9XddeWUD6WE393xoqC"
+ "mgBWrI3tcBQLCBsJRJDFe_8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAABAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD9rOmAu-jSSf1BAj_cC0mu7YCnx4QosD"
+ "78yj3sQX81IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH5Au8JZ_dpXiLY"
+ "aE1TtyGjGz0dtFZa7eGooRGTQzoJJuR8Xj-zUvyCKE4ABy0pajfE8lOGSUHuJoifisJ"
+ "NAhg4gAAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fBQDIDQAALS0tLS1CR"
+ "UdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVmekNDQkNhZ0F3SUJBZ0lVRk5xSnZZZTU4"
+ "ZXlpUjI2Yzd0L2lxU0pNYnFNd0NnWUlLb1pJemowRUF3SXcKY1RFak1DRUdBMVVFQXd"
+ "3YVNXNTBaV3dnVTBkWUlGQkRTeUJRY205alpYTnpiM0lnUTBFeEdqQVlCZ05WQkFvTQ"
+ "pFVWx1ZEdWc0lFTnZjbkJ2Y21GMGFXOXVNUlF3RWdZRFZRUUhEQXRUWVc1MFlTQkRiR"
+ "0Z5WVRFTE1Ba0dBMVVFCkNBd0NRMEV4Q3pBSkJnTlZCQVlUQWxWVE1CNFhEVEl4TURR"
+ "eU1USXdOVGt6T0ZvWERUSTRNRFF5TVRJd05Ua3oKT0Zvd2NERWlNQ0FHQTFVRUF3d1p"
+ "TVzUwWld3Z1UwZFlJRkJEU3lCRFpYSjBhV1pwWTJGMFpURWFNQmdHQTFVRQpDZ3dSU1"
+ "c1MFpXd2dRMjl5Y0c5eVlYUnBiMjR4RkRBU0JnTlZCQWNNQzFOaGJuUmhJRU5zWVhKa"
+ "E1Rc3dDUVlEClZRUUlEQUpEUVRFTE1Ba0dBMVVFQmhNQ1ZWTXdXVEFUQmdjcWhrak9Q"
+ "UUlCQmdncWhrak9QUU1CQndOQ0FBUTgKU2V1NWV4WCtvMGNkclhkeEtHMGEvQXRzdnV"
+ "lNVNoUFpmOHgwa2czc0xSM2E5TzVHWWYwcW1XSkptL0c4bzZyVgpvbVI2Nmh3cFJXNl"
+ "pqSm9ocXdvT280SUNtekNDQXBjd0h3WURWUjBqQkJnd0ZvQVUwT2lxMm5YWCtTNUpGN"
+ "Wc4CmV4UmwwTlh5V1Uwd1h3WURWUjBmQkZnd1ZqQlVvRktnVUlaT2FIUjBjSE02THk5"
+ "aGNHa3VkSEoxYzNSbFpITmwKY25acFkyVnpMbWx1ZEdWc0xtTnZiUzl6WjNndlkyVnl"
+ "kR2xtYVdOaGRHbHZiaTkyTWk5d1kydGpjbXcvWTJFOQpjSEp2WTJWemMyOXlNQjBHQT"
+ "FVZERnUVdCQlFzbnhWelhVWnhwRkd5YUtXdzhWZmdOZXBjcHpBT0JnTlZIUThCCkFmO"
+ "EVCQU1DQnNBd0RBWURWUjBUQVFIL0JBSXdBRENDQWRRR0NTcUdTSWI0VFFFTkFRU0NB"
+ "Y1V3Z2dIQk1CNEcKQ2lxR1NJYjRUUUVOQVFFRUVEeEI4dUNBTVU0bmw1ZlBFaktxdG8"
+ "wd2dnRmtCZ29xaGtpRytFMEJEUUVDTUlJQgpWREFRQmdzcWhraUcrRTBCRFFFQ0FRSU"
+ "JFVEFRQmdzcWhraUcrRTBCRFFFQ0FnSUJFVEFRQmdzcWhraUcrRTBCCkRRRUNBd0lCQ"
+ "WpBUUJnc3Foa2lHK0UwQkRRRUNCQUlCQkRBUUJnc3Foa2lHK0UwQkRRRUNCUUlCQVRB"
+ "UkJnc3EKaGtpRytFMEJEUUVDQmdJQ0FJQXdFQVlMS29aSWh2aE5BUTBCQWdjQ0FRWXd"
+ "FQVlMS29aSWh2aE5BUTBCQWdnQwpBUUF3RUFZTEtvWklodmhOQVEwQkFna0NBUUF3RU"
+ "FZTEtvWklodmhOQVEwQkFnb0NBUUF3RUFZTEtvWklodmhOCkFRMEJBZ3NDQVFBd0VBW"
+ "UxLb1pJaHZoTkFRMEJBZ3dDQVFBd0VBWUxLb1pJaHZoTkFRMEJBZzBDQVFBd0VBWUwK"
+ "S29aSWh2aE5BUTBCQWc0Q0FRQXdFQVlMS29aSWh2aE5BUTBCQWc4Q0FRQXdFQVlMS29"
+ "aSWh2aE5BUTBCQWhBQwpBUUF3RUFZTEtvWklodmhOQVEwQkFoRUNBUW93SHdZTEtvWk"
+ "lodmhOQVEwQkFoSUVFQkVSQWdRQmdBWUFBQUFBCkFBQUFBQUF3RUFZS0tvWklodmhOQ"
+ "VEwQkF3UUNBQUF3RkFZS0tvWklodmhOQVEwQkJBUUdBSkJ1MVFBQU1BOEcKQ2lxR1NJ"
+ "YjRUUUVOQVFVS0FRQXdDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdjREZEZHl1UFRHRVR"
+ "ORm5BU0QzOApDWTNSNmlBREpEVHZBbHZTWDNIekk4a0NJRDZsVm1DWklYUHk4ekpKMW"
+ "gvMnJ1NjJsdlVVWDJJaU1ibVFOUEEwClBzMC8KLS0tLS1FTkQgQ0VSVElGSUNBVEUtL"
+ "S0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQ2x6Q0NBajZnQXdJQkFn"
+ "SVZBTkRvcXRwMTEva3VTUmVZUEhzVVpkRFY4bGxOTUFvR0NDcUdTTTQ5QkFNQwpNR2d"
+ "4R2pBWUJnTlZCQU1NRVVsdWRHVnNJRk5IV0NCU2IyOTBJRU5CTVJvd0dBWURWUVFLRE"
+ "JGSmJuUmxiQ0JECmIzSndiM0poZEdsdmJqRVVNQklHQTFVRUJ3d0xVMkZ1ZEdFZ1Eye"
+ "GhjbUV4Q3pBSkJnTlZCQWdNQWtOQk1Rc3cKQ1FZRFZRUUdFd0pWVXpBZUZ3MHhPREEx"
+ "TWpFeE1EUTFNRGhhRncwek16QTFNakV4TURRMU1EaGFNSEV4SXpBaApCZ05WQkFNTUd"
+ "rbHVkR1ZzSUZOSFdDQlFRMHNnVUhKdlkyVnpjMjl5SUVOQk1Sb3dHQVlEVlFRS0RCRk"
+ "piblJsCmJDQkRiM0p3YjNKaGRHbHZiakVVTUJJR0ExVUVCd3dMVTJGdWRHRWdRMnhoY"
+ "21FeEN6QUpCZ05WQkFnTUFrTkIKTVFzd0NRWURWUVFHRXdKVlV6QlpNQk1HQnlxR1NN"
+ "NDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJMOXErTk1wMklPZwp0ZGwxYmsvdVdaNStUR1F"
+ "tOGFDaTh6NzhmcytmS0NRM2QrdUR6WG5WVEFUMlpoRENpZnlJdUp3dk4zd05CcDlpCk"
+ "hCU1NNSk1KckJPamdic3dnYmd3SHdZRFZSMGpCQmd3Rm9BVUltVU0xbHFkTkluemc3U"
+ "1ZVcjlRR3prbkJxd3cKVWdZRFZSMGZCRXN3U1RCSG9FV2dRNFpCYUhSMGNITTZMeTlq"
+ "WlhKMGFXWnBZMkYwWlhNdWRISjFjM1JsWkhObApjblpwWTJWekxtbHVkR1ZzTG1OdmJ"
+ "TOUpiblJsYkZOSFdGSnZiM1JEUVM1amNtd3dIUVlEVlIwT0JCWUVGTkRvCnF0cDExL2"
+ "t1U1JlWVBIc1VaZERWOGxsTk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBU0JnTlZIUk1CQ"
+ "WY4RUNEQUcKQVFIL0FnRUFNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJQy85ais4NFQr"
+ "SHp0Vk8vc09RQldKYlNkKy8ydWV4Swo0K2FBMGpjRkJMY3BBaUEzZGhNckY1Y0Q1MnQ"
+ "2RnFNdkFJcGo4WGRHbXkyYmVlbGpMSksrcHpwY1JBPT0KLS0tLS1FTkQgQ0VSVElGSU"
+ "NBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQ2pqQ0NBalNnQ"
+ "XdJQkFnSVVJbVVNMWxxZE5JbnpnN1NWVXI5UUd6a25CcXd3Q2dZSUtvWkl6ajBFQXdJ"
+ "dwphREVhTUJnR0ExVUVBd3dSU1c1MFpXd2dVMGRZSUZKdmIzUWdRMEV4R2pBWUJnTlZ"
+ "CQW9NRVVsdWRHVnNJRU52CmNuQnZjbUYwYVc5dU1SUXdFZ1lEVlFRSERBdFRZVzUwWV"
+ "NCRGJHRnlZVEVMTUFrR0ExVUVDQXdDUTBFeEN6QUoKQmdOVkJBWVRBbFZUTUI0WERUR"
+ "TRNRFV5TVRFd05ERXhNVm9YRFRNek1EVXlNVEV3TkRFeE1Gb3dhREVhTUJnRwpBMVVF"
+ "QXd3UlNXNTBaV3dnVTBkWUlGSnZiM1FnUTBFeEdqQVlCZ05WQkFvTUVVbHVkR1ZzSUV"
+ "OdmNuQnZjbUYwCmFXOXVNUlF3RWdZRFZRUUhEQXRUWVc1MFlTQkRiR0Z5WVRFTE1Ba0"
+ "dBMVVFQ0F3Q1EwRXhDekFKQmdOVkJBWVQKQWxWVE1Ga3dFd1lIS29aSXpqMENBUVlJS"
+ "29aSXpqMERBUWNEUWdBRUM2bkV3TURJWVpPai9pUFdzQ3phRUtpNwoxT2lPU0xSRmhX"
+ "R2pibkJWSmZWbmtZNHUzSWprRFlZTDBNeE80bXFzeVlqbEJhbFRWWXhGUDJzSkJLNXp"
+ "sS09CCnV6Q0J1REFmQmdOVkhTTUVHREFXZ0JRaVpReldXcDAwaWZPRHRKVlN2MUFiT1"
+ "NjR3JEQlNCZ05WSFI4RVN6QkoKTUVlZ1JhQkRoa0ZvZEhSd2N6b3ZMMk5sY25ScFptb"
+ "GpZWFJsY3k1MGNuVnpkR1ZrYzJWeWRtbGpaWE11YVc1MApaV3d1WTI5dEwwbHVkR1Zz"
+ "VTBkWVVtOXZkRU5CTG1OeWJEQWRCZ05WSFE0RUZnUVVJbVVNMWxxZE5JbnpnN1NWClV"
+ "yOVFHemtuQnF3d0RnWURWUjBQQVFIL0JBUURBZ0VHTUJJR0ExVWRFd0VCL3dRSU1BWU"
+ "JBZjhDQVFFd0NnWUkKS29aSXpqMEVBd0lEU0FBd1JRSWdRUXMvMDhyeWNkUGF1Q0ZrO"
+ "FVQUVhDTUFsc2xvQmU3TndhUUdUY2RwYTBFQwpJUUNVdDhTR3Z4S21qcGNNL3owV1A5"
+ "RHZvOGgyazVkdTFpV0RkQmtBbiswaWlBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0"
+ "tLQoA";
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclave(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY));
AttestationResult result = client.attestSgxEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveNoRuntimeData(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationResult result = client.attestSgxEnclave(sgxQuote);
verifyAttestationResult(clientUri, result, null, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveRuntimeJson(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
AttestationResult result = client.attestSgxEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveDraftPolicy(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
Response<AttestationResult> response = client.attestSgxEnclaveWithResponse(request, Context.NONE);
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
assertTrue(attestResponse.getToken().getAlgorithm() == "none");
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveNoRuntimeDataAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote);
StepVerifier.create(client.attestSgxEnclave(request))
.assertNext(result -> verifyAttestationResult(clientUri, result, null, false))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveRuntimeJsonAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions options = new AttestationOptions(sgxQuote)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestSgxEnclave(options))
.assertNext(result -> verifyAttestationResult(clientUri, result, decodedRuntimeData, true))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestSgxEnclaveDraftPolicyAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
BinaryData sgxQuote = BinaryData.fromBytes(Arrays.copyOfRange(decodedOpenEnclaveReport.toBytes(), 0x10, decodedOpenEnclaveReport.toBytes().length));
AttestationOptions request = new AttestationOptions(sgxQuote)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestSgxEnclaveWithResponse(request))
.assertNext(response -> {
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
})
.expectComplete()
.verify();
}
@Test()
void testAttestationOptions() {
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request1 = new AttestationOptions(decodedOpenEnclaveReport);
AttestationOptions request2 = new AttestationOptions(decodedOpenEnclaveReport)
.setInitTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON))
.setInitTimeData(new AttestationData(decodedOpenEnclaveReport, AttestationDataInterpretation.BINARY))
.setRunTimeData(new AttestationData(BinaryData.fromBytes(new byte[]{1, 2, 3, 4, 5}), AttestationDataInterpretation.BINARY));
assertArrayEquals(decodedOpenEnclaveReport.toBytes(), request2.getInitTimeData().getData().toBytes());
assertArrayEquals(new byte[]{1, 2, 3, 4, 5}, request2.getRunTimeData().getData().toBytes());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclave(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY));
AttestationResult result = client.attestOpenEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveNoRuntimeData(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport);
AttestationResult result = client.attestOpenEnclave(request);
verifyAttestationResult(clientUri, result, null, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveRuntimeJson(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
AttestationResult result = client.attestOpenEnclave(request);
verifyAttestationResult(clientUri, result, decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveDraftPolicy(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationClient client = attestationBuilder.buildClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions request = new AttestationOptions(decodedOpenEnclaveReport)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
Response<AttestationResult> response = client.attestOpenEnclaveWithResponse(request, Context.NONE);
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
assertTrue(attestResponse.getToken().getAlgorithm() == "none");
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions options = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.BINARY));
StepVerifier.create(client.attestOpenEnclave(options))
.assertNext(result -> verifyAttestationResult(clientUri, result, decodedRuntimeData, false))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveNoRuntimeDataAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
StepVerifier.create(client.attestOpenEnclave(decodedOpenEnclaveReport))
.assertNext(result -> verifyAttestationResult(clientUri, result, null, false))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveRuntimeJsonAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions options = new AttestationOptions(decodedOpenEnclaveReport)
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestOpenEnclave(options))
.assertNext(result -> verifyAttestationResult(clientUri, result, decodedRuntimeData, true))
.expectComplete()
.verify();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void testAttestOpenEnclaveDraftPolicyAsync(HttpClient httpClient, String clientUri) {
AttestationClientBuilder attestationBuilder = getAttestationBuilder(httpClient, clientUri);
AttestationAsyncClient client = attestationBuilder.buildAsyncClient();
BinaryData decodedRuntimeData = BinaryData.fromBytes(Base64.getUrlDecoder().decode(runtimeData));
BinaryData decodedOpenEnclaveReport = BinaryData.fromBytes(Base64.getUrlDecoder().decode(openEnclaveReport));
AttestationOptions options = new AttestationOptions(decodedOpenEnclaveReport)
.setDraftPolicyForAttestation("version=1.0; authorizationrules{=> permit();}; issuancerules{};")
.setRunTimeData(new AttestationData(decodedRuntimeData, AttestationDataInterpretation.JSON));
StepVerifier.create(client.attestOpenEnclaveWithResponse(options))
.assertNext(response -> {
assertTrue(response instanceof AttestationResponse);
AttestationResponse<AttestationResult> attestResponse = (AttestationResponse<AttestationResult>) response;
verifyAttestationResult(clientUri, response.getValue(), decodedRuntimeData, true);
})
.expectComplete()
.verify();
}
private void verifyAttestationResult(String clientUri, AttestationResult result, BinaryData runtimeData, boolean expectJson) {
assertNotNull(result.getIssuer());
if (testContextManager.getTestMode() != TestMode.PLAYBACK) {
Assertions.assertEquals(clientUri, result.getIssuer());
}
assertNotNull(result.getMrEnclave());
assertNotNull(result.getMrSigner());
assertNotNull(result.getSvn());
assertNull(result.getNonce());
if (expectJson) {
ObjectMapper mapper = new ObjectMapper();
assertTrue(result.getRuntimeClaims() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, Object> runtimeClaims = (Map<String, Object>) result.getRuntimeClaims();
@SuppressWarnings("unchecked")
Map<String, Object> expectedClaims = assertDoesNotThrow(() -> (Map<String, Object>) mapper.readValue(runtimeData.toBytes(), Object.class));
assertObjectEqual(expectedClaims, runtimeClaims);
} else if (runtimeData != null) {
Assertions.assertArrayEquals(runtimeData.toBytes(), result.getEnclaveHeldData().toBytes());
}
}
void assertObjectEqual(Map<String, Object> expected, Map<String, Object> actual) {
expected.forEach((key, o) -> {
logger.verbose("Key: " + key);
assertTrue(actual.containsKey(key));
if (expected.get(key) instanceof Map) {
assertTrue(actual.get(key) instanceof Map);
@SuppressWarnings("unchecked")
Map<String, Object> expectedInner = (Map<String, Object>) expected.get(key);
@SuppressWarnings("unchecked")
Map<String, Object> actualInner = (Map<String, Object>) actual.get(key);
assertObjectEqual(expectedInner, actualInner);
} else {
assertEquals(o, actual.get(key));
}
});
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpm() {
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpmAsync() {
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpmWithResponse() {
}
/**
* This test cannot be written until the setPolicy APIs are written because it depends on
* setting attestation policy :(.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getAttestationClients")
void attestTpmWithResponseAsync() {
}
} |
It is not recommended to use underscores for variable names. ```suggestion if (secretNames != null) { return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync()); } ``` | public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
} | } | public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}
@Override
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
}
@Override
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
It is not recommended to use underscores for variable names. ```suggestion secretNames = new String[options.getCount()]; for (int i=0; i < secretNames.length; i++) { secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID(); } return Flux.fromArray(secretNames) .flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName)) .then(); ``` | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | .then(); | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
Should we consider deleting all secrets and then continue instead of throwing? | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " + | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
We should avoid calling `block()` in an async method. | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | super.globalSetupAsync().block(); | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
Return `Mono.error()` instead. >Should we consider deleting all secrets and then continue instead of throwing? @vcolin7 We should not automatically delete just to be safe that we are not deleting something that shouldn't be. The executor of perf tests should ensure that there are no secrets before running the tests. Alternatively, we could add a flag to the test to delete all secrets before starting the test. | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " + | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
Do you know how to convert the code in this method to reactor? I used `block()` here because I wasn't sure how to chain all the other code after `globalSetupAsync()`. One way I would know how to do it, is to move all the code after `globalSetupAsync()` into a separate async method, then chain this after `globalSetupAsync()`. | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | super.globalSetupAsync().block(); | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
@srnagar covered most of it. It's dangerous for tests to delete resources they didn't create, so we just check and throw instead. Perf tests should clean up after themselves, so you should only hit this if an earlier perf test crashed (or somehow failed to cleanup), or if you ran against the wrong KeyVault (and we **really** don't want to delete everything then). | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " + | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
I attempted to convert it using `then(Mono.defer())`. Is this the best way? | public Mono<Void> globalSetupAsync() {
super.globalSetupAsync().block();
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
throw new RuntimeException("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test");
}
_secretNames = new String[options.getCount()];
for (int i=0; i < _secretNames.length; i++) {
_secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(_secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
} | super.globalSetupAsync().block(); | public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync().then(Mono.defer(() -> {
if (secretClient.listPropertiesOfSecrets().iterator().hasNext() ||
secretClient.listDeletedSecrets().iterator().hasNext()) {
return Mono.error(new Exception("KeyVault " + secretClient.getVaultUrl() + "must contain 0 " +
"secrets (including soft-deleted) before starting perf test"));
}
secretNames = new String[options.getCount()];
for (int i=0; i < secretNames.length; i++) {
secretNames[i] = "listSecretsPerfTest-" + UUID.randomUUID();
}
return Flux.fromArray(secretNames)
.flatMap(secretName -> secretAsyncClient.setSecret(secretName, secretName))
.then();
}));
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] _secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (_secretNames != null) {
return deleteAndPurgeSecretsAsync(_secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} | class ListSecretsTest extends SecretsTest<PerfStressOptions> {
private String[] secretNames;
public ListSecretsTest(PerfStressOptions options) {
super(options);
}
@Override
@Override
public Mono<Void> globalCleanupAsync() {
if (secretNames != null) {
return deleteAndPurgeSecretsAsync(secretNames).then(super.globalCleanupAsync());
}
else {
return super.globalCleanupAsync();
}
}
@Override
public void run() {
secretClient.listPropertiesOfSecrets().forEach(b -> {
});
}
@Override
public Mono<Void> runAsync() {
return secretAsyncClient.listPropertiesOfSecrets()
.then();
}
} |
We have a test case for this api , can you please quickly check that increasingly the value does touch more replicas, manual test would be sufficient. | public Mono<Void> openConnectionsAndInitCaches() {
int retryCount = Configs.getOpenAsyncRetriesCount();
if(isInitialized.compareAndSet(false, true)) {
return this.getFeedRanges().flatMap(feedRanges -> {
List<Flux<FeedResponse<ObjectNode>>> fluxList = new ArrayList<>();
SqlQuerySpec querySpec = new SqlQuerySpec();
querySpec.setQueryText("select * from c where c.id = @id");
querySpec.setParameters(Collections.singletonList(new SqlParameter("@id",
UUID.randomUUID().toString())));
for (int i = 0; i < retryCount; i++) {
for (FeedRange feedRange : feedRanges) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setFeedRange(feedRange);
CosmosPagedFlux<ObjectNode> cosmosPagedFlux = this.queryItems(querySpec, options,
ObjectNode.class);
fluxList.add(cosmosPagedFlux.byPage());
}
}
Mono<List<FeedResponse<ObjectNode>>> listMono = Flux.merge(fluxList).collectList();
return listMono.flatMap(objects -> Mono.empty());
});
} else {
logger.warn("openConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId());
return Mono.empty();
}
} | for (int i = 0; i < retryCount; i++) { | public Mono<Void> openConnectionsAndInitCaches() {
int retryCount = Configs.getOpenConnectionsRetriesCount();
if(isInitialized.compareAndSet(false, true)) {
return this.getFeedRanges().flatMap(feedRanges -> {
List<Flux<FeedResponse<ObjectNode>>> fluxList = new ArrayList<>();
SqlQuerySpec querySpec = new SqlQuerySpec();
querySpec.setQueryText("select * from c where c.id = @id");
querySpec.setParameters(Collections.singletonList(new SqlParameter("@id",
UUID.randomUUID().toString())));
for (int i = 0; i < retryCount; i++) {
for (FeedRange feedRange : feedRanges) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setFeedRange(feedRange);
CosmosPagedFlux<ObjectNode> cosmosPagedFlux = this.queryItems(querySpec, options,
ObjectNode.class);
fluxList.add(cosmosPagedFlux.byPage());
}
}
Mono<List<FeedResponse<ObjectNode>>> listMono = Flux.merge(fluxList).collectList();
return listMono.flatMap(objects -> Mono.empty());
});
} else {
logger.warn("openConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId());
return Mono.empty();
}
} | class type.
* @return a {@link CosmosPagedFlux} | class type.
* @return a {@link CosmosPagedFlux} |
yes, manually test based on the test, it will touch more replicas. | public Mono<Void> openConnectionsAndInitCaches() {
int retryCount = Configs.getOpenAsyncRetriesCount();
if(isInitialized.compareAndSet(false, true)) {
return this.getFeedRanges().flatMap(feedRanges -> {
List<Flux<FeedResponse<ObjectNode>>> fluxList = new ArrayList<>();
SqlQuerySpec querySpec = new SqlQuerySpec();
querySpec.setQueryText("select * from c where c.id = @id");
querySpec.setParameters(Collections.singletonList(new SqlParameter("@id",
UUID.randomUUID().toString())));
for (int i = 0; i < retryCount; i++) {
for (FeedRange feedRange : feedRanges) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setFeedRange(feedRange);
CosmosPagedFlux<ObjectNode> cosmosPagedFlux = this.queryItems(querySpec, options,
ObjectNode.class);
fluxList.add(cosmosPagedFlux.byPage());
}
}
Mono<List<FeedResponse<ObjectNode>>> listMono = Flux.merge(fluxList).collectList();
return listMono.flatMap(objects -> Mono.empty());
});
} else {
logger.warn("openConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId());
return Mono.empty();
}
} | for (int i = 0; i < retryCount; i++) { | public Mono<Void> openConnectionsAndInitCaches() {
int retryCount = Configs.getOpenConnectionsRetriesCount();
if(isInitialized.compareAndSet(false, true)) {
return this.getFeedRanges().flatMap(feedRanges -> {
List<Flux<FeedResponse<ObjectNode>>> fluxList = new ArrayList<>();
SqlQuerySpec querySpec = new SqlQuerySpec();
querySpec.setQueryText("select * from c where c.id = @id");
querySpec.setParameters(Collections.singletonList(new SqlParameter("@id",
UUID.randomUUID().toString())));
for (int i = 0; i < retryCount; i++) {
for (FeedRange feedRange : feedRanges) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setFeedRange(feedRange);
CosmosPagedFlux<ObjectNode> cosmosPagedFlux = this.queryItems(querySpec, options,
ObjectNode.class);
fluxList.add(cosmosPagedFlux.byPage());
}
}
Mono<List<FeedResponse<ObjectNode>>> listMono = Flux.merge(fluxList).collectList();
return listMono.flatMap(objects -> Mono.empty());
});
} else {
logger.warn("openConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId());
return Mono.empty();
}
} | class type.
* @return a {@link CosmosPagedFlux} | class type.
* @return a {@link CosmosPagedFlux} |
Does the autorest supports this directly? Or was this a manual edit? | public AzureLogAnalyticsImplBuilder audience(String audience) {
this.audience = audience;
return this;
} | } | public AzureLogAnalyticsImplBuilder audience(String audience) {
this.audience = audience;
return this;
} | class AzureLogAnalyticsImplBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final Map<String, String> properties = CoreUtils.getProperties("azure-monitor-query.properties");
private String audience;
/** Create an instance of the AzureLogAnalyticsImplBuilder. */
public AzureLogAnalyticsImplBuilder() {
this.pipelinePolicies = new ArrayList<>();
}
/*
* server parameter
*/
private String host;
/**
* Sets server parameter.
*
* @param host the host value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder host(String host) {
this.host = host;
return this;
}
/*
* The HTTP pipeline to send requests through
*/
private HttpPipeline pipeline;
/**
* Sets The HTTP pipeline to send requests through.
*
* @param pipeline the pipeline value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/*
* The serializer to serialize an object into a string
*/
private SerializerAdapter serializerAdapter;
/**
* Sets The serializer to serialize an object into a string.
*
* @param serializerAdapter the serializerAdapter value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
return this;
}
/*
* The HTTP client used to send the request.
*/
private HttpClient httpClient;
/**
* Sets The HTTP client used to send the request.
*
* @param httpClient the httpClient value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/*
* The configuration store that is used during construction of the service
* client.
*/
private Configuration configuration;
/**
* Sets The configuration store that is used during construction of the service client.
*
* @param configuration the configuration value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/*
* The TokenCredential used for authentication.
*/
private TokenCredential tokenCredential;
/**
* Sets The TokenCredential used for authentication.
*
* @param tokenCredential the tokenCredential value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/*
* The logging configuration for HTTP requests and responses.
*/
private HttpLogOptions httpLogOptions;
/**
* Sets The logging configuration for HTTP requests and responses.
*
* @param httpLogOptions the httpLogOptions value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/*
* The retry policy that will attempt to retry failed requests, if
* applicable.
*/
private RetryPolicy retryPolicy;
/**
* Sets The retry policy that will attempt to retry failed requests, if applicable.
*
* @param retryPolicy the retryPolicy value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/*
* The list of Http pipeline policies to add.
*/
private final List<HttpPipelinePolicy> pipelinePolicies;
/**
* Adds a custom Http pipeline policy.
*
* @param customPolicy The custom Http pipeline policy to add.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder addPolicy(HttpPipelinePolicy customPolicy) {
pipelinePolicies.add(customPolicy);
return this;
}
/**
* Sets the audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience will be
* used if the property is null.
* @param audience audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience
* will be used if the property is null.
* @return the {@link AzureLogAnalyticsImplBuilder}.
*/
/**
* Builds an instance of AzureLogAnalyticsImpl with the provided parameters.
*
* @return an instance of AzureLogAnalyticsImpl.
*/
public AzureLogAnalyticsImpl buildClient() {
if (host == null) {
this.host = "https:
}
String hostVersion = this.host + "/v1";
if (pipeline == null) {
this.pipeline = createHttpPipeline();
}
if (serializerAdapter == null) {
this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
AzureLogAnalyticsImpl client = new AzureLogAnalyticsImpl(pipeline, serializerAdapter, hostVersion);
return client;
}
private HttpPipeline createHttpPipeline() {
Configuration buildConfiguration =
(configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(
new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
String resolvedAudience = this.audience;
if (resolvedAudience == null) {
resolvedAudience = LogsQueryClientAudience.AZURE_PUBLIC_CLOUD.toString();
}
resolvedAudience += "/.default";
BearerTokenAuthenticationPolicy tokenPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential,
resolvedAudience);
policies.add(tokenPolicy);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.addAll(this.pipelinePolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
} | class AzureLogAnalyticsImplBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final Map<String, String> properties = CoreUtils.getProperties("azure-monitor-query.properties");
private String audience;
/** Create an instance of the AzureLogAnalyticsImplBuilder. */
public AzureLogAnalyticsImplBuilder() {
this.pipelinePolicies = new ArrayList<>();
}
/*
* server parameter
*/
private String host;
/**
* Sets server parameter.
*
* @param host the host value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder host(String host) {
this.host = host;
return this;
}
/*
* The HTTP pipeline to send requests through
*/
private HttpPipeline pipeline;
/**
* Sets The HTTP pipeline to send requests through.
*
* @param pipeline the pipeline value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/*
* The serializer to serialize an object into a string
*/
private SerializerAdapter serializerAdapter;
/**
* Sets The serializer to serialize an object into a string.
*
* @param serializerAdapter the serializerAdapter value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
return this;
}
/*
* The HTTP client used to send the request.
*/
private HttpClient httpClient;
/**
* Sets The HTTP client used to send the request.
*
* @param httpClient the httpClient value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/*
* The configuration store that is used during construction of the service
* client.
*/
private Configuration configuration;
/**
* Sets The configuration store that is used during construction of the service client.
*
* @param configuration the configuration value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/*
* The TokenCredential used for authentication.
*/
private TokenCredential tokenCredential;
/**
* Sets The TokenCredential used for authentication.
*
* @param tokenCredential the tokenCredential value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/*
* The logging configuration for HTTP requests and responses.
*/
private HttpLogOptions httpLogOptions;
/**
* Sets The logging configuration for HTTP requests and responses.
*
* @param httpLogOptions the httpLogOptions value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/*
* The retry policy that will attempt to retry failed requests, if
* applicable.
*/
private RetryPolicy retryPolicy;
/**
* Sets The retry policy that will attempt to retry failed requests, if applicable.
*
* @param retryPolicy the retryPolicy value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/*
* The list of Http pipeline policies to add.
*/
private final List<HttpPipelinePolicy> pipelinePolicies;
/**
* Adds a custom Http pipeline policy.
*
* @param customPolicy The custom Http pipeline policy to add.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder addPolicy(HttpPipelinePolicy customPolicy) {
pipelinePolicies.add(customPolicy);
return this;
}
/**
* Sets the audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience will be
* used if the property is null.
* @param audience audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience
* will be used if the property is null.
* @return the {@link AzureLogAnalyticsImplBuilder}.
*/
/**
* Builds an instance of AzureLogAnalyticsImpl with the provided parameters.
*
* @return an instance of AzureLogAnalyticsImpl.
*/
public AzureLogAnalyticsImpl buildClient() {
if (host == null) {
this.host = "https:
}
String hostVersion = this.host + "/v1";
if (pipeline == null) {
this.pipeline = createHttpPipeline();
}
if (serializerAdapter == null) {
this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
AzureLogAnalyticsImpl client = new AzureLogAnalyticsImpl(pipeline, serializerAdapter, hostVersion);
return client;
}
private HttpPipeline createHttpPipeline() {
Configuration buildConfiguration =
(configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(
new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
String resolvedAudience = this.audience;
if (resolvedAudience == null) {
resolvedAudience = LogsQueryClientAudience.AZURE_PUBLIC_CLOUD.toString();
}
resolvedAudience += "/.default";
BearerTokenAuthenticationPolicy tokenPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential,
resolvedAudience);
policies.add(tokenPolicy);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.addAll(this.pipelinePolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
} |
This was a manual edit. AutoRest doesn't support this yet but will soon be supported. | public AzureLogAnalyticsImplBuilder audience(String audience) {
this.audience = audience;
return this;
} | } | public AzureLogAnalyticsImplBuilder audience(String audience) {
this.audience = audience;
return this;
} | class AzureLogAnalyticsImplBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final Map<String, String> properties = CoreUtils.getProperties("azure-monitor-query.properties");
private String audience;
/** Create an instance of the AzureLogAnalyticsImplBuilder. */
public AzureLogAnalyticsImplBuilder() {
this.pipelinePolicies = new ArrayList<>();
}
/*
* server parameter
*/
private String host;
/**
* Sets server parameter.
*
* @param host the host value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder host(String host) {
this.host = host;
return this;
}
/*
* The HTTP pipeline to send requests through
*/
private HttpPipeline pipeline;
/**
* Sets The HTTP pipeline to send requests through.
*
* @param pipeline the pipeline value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/*
* The serializer to serialize an object into a string
*/
private SerializerAdapter serializerAdapter;
/**
* Sets The serializer to serialize an object into a string.
*
* @param serializerAdapter the serializerAdapter value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
return this;
}
/*
* The HTTP client used to send the request.
*/
private HttpClient httpClient;
/**
* Sets The HTTP client used to send the request.
*
* @param httpClient the httpClient value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/*
* The configuration store that is used during construction of the service
* client.
*/
private Configuration configuration;
/**
* Sets The configuration store that is used during construction of the service client.
*
* @param configuration the configuration value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/*
* The TokenCredential used for authentication.
*/
private TokenCredential tokenCredential;
/**
* Sets The TokenCredential used for authentication.
*
* @param tokenCredential the tokenCredential value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/*
* The logging configuration for HTTP requests and responses.
*/
private HttpLogOptions httpLogOptions;
/**
* Sets The logging configuration for HTTP requests and responses.
*
* @param httpLogOptions the httpLogOptions value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/*
* The retry policy that will attempt to retry failed requests, if
* applicable.
*/
private RetryPolicy retryPolicy;
/**
* Sets The retry policy that will attempt to retry failed requests, if applicable.
*
* @param retryPolicy the retryPolicy value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/*
* The list of Http pipeline policies to add.
*/
private final List<HttpPipelinePolicy> pipelinePolicies;
/**
* Adds a custom Http pipeline policy.
*
* @param customPolicy The custom Http pipeline policy to add.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder addPolicy(HttpPipelinePolicy customPolicy) {
pipelinePolicies.add(customPolicy);
return this;
}
/**
* Sets the audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience will be
* used if the property is null.
* @param audience audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience
* will be used if the property is null.
* @return the {@link AzureLogAnalyticsImplBuilder}.
*/
/**
* Builds an instance of AzureLogAnalyticsImpl with the provided parameters.
*
* @return an instance of AzureLogAnalyticsImpl.
*/
public AzureLogAnalyticsImpl buildClient() {
if (host == null) {
this.host = "https:
}
String hostVersion = this.host + "/v1";
if (pipeline == null) {
this.pipeline = createHttpPipeline();
}
if (serializerAdapter == null) {
this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
AzureLogAnalyticsImpl client = new AzureLogAnalyticsImpl(pipeline, serializerAdapter, hostVersion);
return client;
}
private HttpPipeline createHttpPipeline() {
Configuration buildConfiguration =
(configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(
new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
String resolvedAudience = this.audience;
if (resolvedAudience == null) {
resolvedAudience = LogsQueryClientAudience.AZURE_PUBLIC_CLOUD.toString();
}
resolvedAudience += "/.default";
BearerTokenAuthenticationPolicy tokenPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential,
resolvedAudience);
policies.add(tokenPolicy);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.addAll(this.pipelinePolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
} | class AzureLogAnalyticsImplBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private final Map<String, String> properties = CoreUtils.getProperties("azure-monitor-query.properties");
private String audience;
/** Create an instance of the AzureLogAnalyticsImplBuilder. */
public AzureLogAnalyticsImplBuilder() {
this.pipelinePolicies = new ArrayList<>();
}
/*
* server parameter
*/
private String host;
/**
* Sets server parameter.
*
* @param host the host value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder host(String host) {
this.host = host;
return this;
}
/*
* The HTTP pipeline to send requests through
*/
private HttpPipeline pipeline;
/**
* Sets The HTTP pipeline to send requests through.
*
* @param pipeline the pipeline value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = pipeline;
return this;
}
/*
* The serializer to serialize an object into a string
*/
private SerializerAdapter serializerAdapter;
/**
* Sets The serializer to serialize an object into a string.
*
* @param serializerAdapter the serializerAdapter value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
return this;
}
/*
* The HTTP client used to send the request.
*/
private HttpClient httpClient;
/**
* Sets The HTTP client used to send the request.
*
* @param httpClient the httpClient value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/*
* The configuration store that is used during construction of the service
* client.
*/
private Configuration configuration;
/**
* Sets The configuration store that is used during construction of the service client.
*
* @param configuration the configuration value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/*
* The TokenCredential used for authentication.
*/
private TokenCredential tokenCredential;
/**
* Sets The TokenCredential used for authentication.
*
* @param tokenCredential the tokenCredential value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/*
* The logging configuration for HTTP requests and responses.
*/
private HttpLogOptions httpLogOptions;
/**
* Sets The logging configuration for HTTP requests and responses.
*
* @param httpLogOptions the httpLogOptions value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/*
* The retry policy that will attempt to retry failed requests, if
* applicable.
*/
private RetryPolicy retryPolicy;
/**
* Sets The retry policy that will attempt to retry failed requests, if applicable.
*
* @param retryPolicy the retryPolicy value.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/*
* The list of Http pipeline policies to add.
*/
private final List<HttpPipelinePolicy> pipelinePolicies;
/**
* Adds a custom Http pipeline policy.
*
* @param customPolicy The custom Http pipeline policy to add.
* @return the AzureLogAnalyticsImplBuilder.
*/
public AzureLogAnalyticsImplBuilder addPolicy(HttpPipelinePolicy customPolicy) {
pipelinePolicies.add(customPolicy);
return this;
}
/**
* Sets the audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience will be
* used if the property is null.
* @param audience audience to use for authentication with Azure Active Directory. The Azure Public Cloud audience
* will be used if the property is null.
* @return the {@link AzureLogAnalyticsImplBuilder}.
*/
/**
* Builds an instance of AzureLogAnalyticsImpl with the provided parameters.
*
* @return an instance of AzureLogAnalyticsImpl.
*/
public AzureLogAnalyticsImpl buildClient() {
if (host == null) {
this.host = "https:
}
String hostVersion = this.host + "/v1";
if (pipeline == null) {
this.pipeline = createHttpPipeline();
}
if (serializerAdapter == null) {
this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter();
}
AzureLogAnalyticsImpl client = new AzureLogAnalyticsImpl(pipeline, serializerAdapter, hostVersion);
return client;
}
private HttpPipeline createHttpPipeline() {
Configuration buildConfiguration =
(configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(
new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
String resolvedAudience = this.audience;
if (resolvedAudience == null) {
resolvedAudience = LogsQueryClientAudience.AZURE_PUBLIC_CLOUD.toString();
}
resolvedAudience += "/.default";
BearerTokenAuthenticationPolicy tokenPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential,
resolvedAudience);
policies.add(tokenPolicy);
policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy);
policies.add(new CookiePolicy());
policies.addAll(this.pipelinePolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
return httpPipeline;
}
} |
Include the `type` name too in the error message. | static Schema getSchema(Object object) throws IllegalArgumentException {
if (object instanceof GenericContainer) {
return ((GenericContainer) object).getSchema();
}
if (object == null) {
return NULL_SCHEMA;
}
final Class<?> objectClass = object.getClass();
final Schema primitiveSchema = getPrimitiveSchema(objectClass);
if (primitiveSchema != null) {
return primitiveSchema;
} else {
throw new IllegalArgumentException("Unsupported Avro type. Supported types are null, GenericContainer,"
+ " Boolean, Integer, Long, Float, Double, String, Byte[], Byte, ByteBuffer, and their primitive"
+ " equivalents.");
}
} | throw new IllegalArgumentException("Unsupported Avro type. Supported types are null, GenericContainer," | static Schema getSchema(Object object) {
if (object instanceof GenericContainer) {
return ((GenericContainer) object).getSchema();
}
if (object == null) {
return NULL_SCHEMA;
}
final Class<?> objectClass = object.getClass();
final Schema primitiveSchema = getPrimitiveSchema(objectClass);
if (primitiveSchema != null) {
return primitiveSchema;
} else {
throw new IllegalArgumentException("Unsupported Avro type. Supported types are null, GenericContainer,"
+ " Boolean, Integer, Long, Float, Double, String, Byte[], Byte, ByteBuffer, and their primitive"
+ " equivalents. Actual: " + objectClass);
}
} | class is abstract but not final.
schemas.put(ByteBuffer.class, byteSchema);
final Schema stringSchema = Schema.create(Schema.Type.STRING);
schemas.put(String.class, stringSchema);
PRIMITIVE_SCHEMAS = Collections.unmodifiableMap(schemas);
}
/**
* Instantiates AvroCodec instance
*
* @param avroSpecificReader flag indicating if decoder should decode records as {@link SpecificRecord
* SpecificRecords} | class is abstract but not final.
schemas.put(ByteBuffer.class, byteSchema);
final Schema stringSchema = Schema.create(Schema.Type.STRING);
schemas.put(String.class, stringSchema);
PRIMITIVE_SCHEMAS = Collections.unmodifiableMap(schemas);
}
/**
* Instantiates AvroCodec instance
*
* @param avroSpecificReader flag indicating if decoder should decode records as {@link SpecificRecord
* SpecificRecords} |
Any reason to add in Jackson instead of using the previous XML parsing functionality? #Resolved | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | ObjectMapper mapper = new XmlMapper(); | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
Missing a `break` #Resolved | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | default: scopeType = ScopeType.COMPILE; | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
Also, do we not care about other types of scope such as `provided`? | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | default: scopeType = ScopeType.COMPILE; | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
Do we want to run the generator if an error occurred when parsing the arguments? #Resolved | public static void main(String[] args) {
BomGenerator generator = null;
try {
generator = parseCommandLine(args);
} catch (FileNotFoundException e) {
System.out.println("Error occurred.");
e.printStackTrace();
}
if(!generator.run()) {
System.exit(1);
}
System.out.println("Completed successfully.");
} | if(!generator.run()) { | public static void main(String[] args) {
BomGenerator generator = null;
try {
generator = parseCommandLine(args);
if(!generator.run()) {
System.exit(1);
}
System.out.println("Completed successfully.");
} catch (FileNotFoundException e) {
System.out.println("Error occurred.");
e.printStackTrace();
System.exit(1);
}
} | class Main {
private static BomGenerator parseCommandLine(String[] args) throws FileNotFoundException {
String inputDir = null, outputDir = null, mode = null;
for (String arg : args) {
Matcher matcher = COMMANDLINE_REGEX.matcher(arg);
if (matcher.matches()) {
if (matcher.groupCount() == 2) {
String argName = matcher.group(1);
String argValue = matcher.group(2);
switch (argName.toLowerCase()) {
case COMMANDLINE_INPUTDIRECTORY:
validateNotNullOrEmpty(argName, argValue);
inputDir = argValue;
break;
case COMMANDLINE_OUTPUTDIRECTORY:
validateNotNullOrEmpty(argName, argValue);
outputDir = argValue;
break;
case COMMANDLINE_MODE:
validateNotNullOrEmpty(argName, argValue);
validateValues(argName, argValue, GENERATE_MODE, ANALYZE_MODE);
mode = argValue;
break;
}
}
}
}
validateNotNullOrEmpty(inputDir, "inputDir");
validateNotNullOrEmpty(outputDir, "outputDir");
BomGenerator generator = new BomGenerator(inputDir, outputDir, mode);
return generator;
}
} | class Main {
private static BomGenerator parseCommandLine(String[] args) throws FileNotFoundException {
String inputDir = null, outputDir = null, mode = null;
for (String arg : args) {
Matcher matcher = COMMANDLINE_REGEX.matcher(arg);
if (matcher.matches()) {
if (matcher.groupCount() == 2) {
String argName = matcher.group(1);
String argValue = matcher.group(2);
switch (argName.toLowerCase()) {
case COMMANDLINE_INPUTDIRECTORY:
validateNotNullOrEmpty(argName, argValue);
inputDir = argValue;
break;
case COMMANDLINE_OUTPUTDIRECTORY:
validateNotNullOrEmpty(argName, argValue);
outputDir = argValue;
break;
case COMMANDLINE_MODE:
validateNotNullOrEmpty(argName, argValue);
validateValues(argName, argValue, GENERATE_MODE, ANALYZE_MODE);
mode = argValue;
break;
}
}
}
}
validateNotNullOrEmpty(inputDir, "inputDir");
validateNotNullOrEmpty(outputDir, "outputDir");
BomGenerator generator = new BomGenerator(inputDir, outputDir, mode);
return generator;
}
} |
Instead of using `Map`s and `List`s everywhere would it be more intuitive to read the POM into an XML tree and perform tree-walking given that POMs should have standard formats. #Resolved | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class); | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
As of now, no. We may eventually need to support both import and provided but currently we have no use case for them. | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | default: scopeType = ScopeType.COMPILE; | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
Good idea. I can do the parsing as that. I would still have to return the List<T> but I do not think that is your concern. | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class); | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
Yes, unfortunately the open project that I used for POM parsing only parses a BOM file (packaging as POM). I will be removing that dependency completely due to some other issues like it does not support adding comments to the POM file (which we need for versioning management in engsys). | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
try {
HashMap<String, Object> value = mapper.readValue(responseStream, HashMap.class);
Object packagingProp = value.getOrDefault("packaging", null);
if(packagingProp != null && packagingProp.toString().equalsIgnoreCase("pom")) {
return parsePomFileContent(responseStream);
}
HashMap<String, Object> dependenciesTag = (HashMap<String, Object>)value.getOrDefault("dependencies", null);
if(dependenciesTag == null) {
return null;
}
ArrayList<HashMap<String, Object>> dependencies = (ArrayList<HashMap<String, Object>>) dependenciesTag.getOrDefault("dependency", null);
for(HashMap<String, Object> dependency: dependencies) {
String groupId = (String) dependency.getOrDefault("groupId", null);
String artifactId = (String) dependency.getOrDefault("artifactId", null);
String version = (String) dependency.getOrDefault("version", null);
String scope = (String) dependency.getOrDefault("scope", ScopeType.COMPILE.toString());
ScopeType scopeType = ScopeType.COMPILE;
switch(scope) {
case "test" : scopeType = ScopeType.TEST;
break;
default: scopeType = ScopeType.COMPILE;
}
bomDependencies.add(new BomDependency(groupId, artifactId, version, scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | ObjectMapper mapper = new XmlMapper(); | static List<BomDependency> parsePomFileContent(Reader responseStream) {
List<BomDependency> bomDependencies = new ArrayList<>();
ObjectMapper mapper = new XmlMapper();
mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
try {
Model value = mapper.readValue(responseStream, Model.class);
List<Dependency> dependencies = value.getDependencies();
if(dependencies == null) {
return bomDependencies;
}
for(Dependency dependency : dependencies) {
ScopeType scopeType = ScopeType.COMPILE;
if("test".equals(dependency.getScope())) {
scopeType = ScopeType.TEST;
}
bomDependencies.add(new BomDependency(
dependency.getGroupId(),
dependency.getArtifactId(),
dependency.getVersion(),
scopeType));
}
} catch (IOException exception) {
exception.printStackTrace();
}
return bomDependencies.stream().distinct().collect(Collectors.toList());
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String EMPTY_STRING = "";
public static final String COMMANDLINE_INPUTFILE = "inputfile";
public static final String COMMANDLINE_OUTPUTFILE = "outputfile";
public static final String COMMANDLINE_POMFILE = "pomfile";
public static final String COMMANDLINE_OVERRIDDEN_INPUTDEPENDENCIES_FILE = "inputdependenciesfile";
public static final String COMMANDLINE_REPORTFILE = "reportfile";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} | class Utils {
public static final String COMMANDLINE_INPUTDIRECTORY = "inputdir";
public static final String COMMANDLINE_OUTPUTDIRECTORY = "outputdir";
public static final String COMMANDLINE_MODE = "mode";
public static final String ANALYZE_MODE = "analyze";
public static final String GENERATE_MODE = "generate";
public static final Pattern COMMANDLINE_REGEX = Pattern.compile("-(.*)=(.*)");
public static final List<String> EXCLUSION_LIST = Arrays.asList("azure-spring-data-cosmos", "azure-spring-data-cosmos-test", "azure-core-test", "azure-sdk-all", "azure-sdk-parent", "azure-client-sdk-parent");
public static final Pattern SDK_DEPENDENCY_PATTERN = Pattern.compile("com.azure:(.+);(.+);(.+)");
public static final String BASE_AZURE_GROUPID = "com.azure";
public static final String AZURE_TEST_LIBRARY_IDENTIFIER = "-test";
public static final String AZURE_PERF_LIBRARY_IDENTIFIER = "-perf";
public static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
public static final Pattern STRING_SPLIT_BY_DOT = Pattern.compile("[.]");
public static final Pattern STRING_SPLIT_BY_COLON = Pattern.compile("[:]");
public static final Pattern INPUT_DEPENDENCY_PATTERN = Pattern.compile("(.+);(.*)");
public static final String PROJECT_VERSION = "project.version";
public static final HashSet<String> RESOLVED_EXCLUSION_LIST = new HashSet<>(Arrays.asList(
"junit-jupiter-api"
));
public static final HashSet<String> IGNORE_CONFLICT_LIST = new HashSet<>(/*Arrays.asList(
"slf4j-api"
)*/);
public static final String POM_TYPE = "pom";
private static Logger logger = LoggerFactory.getLogger(Utils.class);
static void validateNotNullOrEmpty(String argValue, String argName) {
if(argValue == null || argValue.isEmpty()) {
throw new NullPointerException(String.format("%s can't be null", argName));
}
}
static void validateNotNullOrEmpty(String[] argValue, String argName) {
if(Arrays.stream(argValue).anyMatch(value -> value == null || value.isEmpty())) {
throw new IllegalArgumentException(String.format("%s can't be null", argName));
}
}
static MavenResolverSystemBase<PomEquippedResolveStage, PomlessResolveStage, MavenStrategyStage, MavenFormatStage> getMavenResolver() {
return Maven.configureResolver().withMavenCentralRepo(true);
}
static boolean isPublishedArtifact(BomDependency dependency) {
try {
return getResolvedArtifact(dependency) != null;
} catch (Exception ex) {
logger.error(ex.toString());
}
return false;
}
static MavenResolvedArtifact getResolvedArtifact(MavenDependency dependency) {
MavenResolvedArtifact mavenResolvedArtifact = null;
mavenResolvedArtifact = getMavenResolver()
.addDependency(dependency)
.resolve()
.withoutTransitivity()
.asSingleResolvedArtifact();
return mavenResolvedArtifact;
}
static void validateNull(String argValue, String argName) {
if(argValue != null) {
throw new IllegalArgumentException(String.format("%s should be null", argName));
}
}
static void validateValues(String argName, String argValue, String ... expectedValues) {
if(Arrays.stream(expectedValues).noneMatch(a -> a.equals(argValue))) {
throw new IllegalArgumentException(String.format("%s must match %s", argName, Arrays.toString(expectedValues)));
}
}
static List<BomDependency> getExternalDependenciesContent(List<Dependency> dependencies) {
List<BomDependency> allResolvedDependencies = new ArrayList<>();
for (Dependency dependency : dependencies) {
List<BomDependency> resolvedDependencies = getPomFileContent(dependency);
if (resolvedDependencies != null) {
allResolvedDependencies.addAll(resolvedDependencies);
}
}
return allResolvedDependencies;
}
static List<BomDependency> getPomFileContent(Dependency dependency) {
String[] groups = STRING_SPLIT_BY_DOT.split(dependency.getGroupId());
String url = null;
if(groups.length == 2) {
url = "https:
}
else if (groups.length == 3) {
url = "https:
}
else {
throw new UnsupportedOperationException("Can't parse the external BOM file.");
}
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
.header("accept", "application/xml")
.timeout(Duration.ofMillis(5000))
.build();
return HTTP_CLIENT.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream())
.thenApply(response -> {
if(response.statusCode() == 200) {
try (InputStreamReader reader = new InputStreamReader(response.body())) {
return Utils.parsePomFileContent(reader);
}
catch (IOException ex) {
logger.error("Failed to read contents for {}", dependency.toString());
}
}
return null;
}).join();
}
static BomDependencyNoVersion toBomDependencyNoVersion(BomDependency bomDependency) {
return new BomDependencyNoVersion(bomDependency.getGroupId(), bomDependency.getArtifactId());
}
static List<BomDependency> parsePomFileContent(String fileName) {
try (FileReader reader = new FileReader(fileName)) {
return parsePomFileContent(reader);
} catch (IOException exception) {
logger.error("Failed to read the contents of the pom file: {}", fileName);
}
return new ArrayList<>();
}
static List<BomDependency> parseBomFileContent(Reader responseStream) {
MavenXpp3Reader reader = new MavenXpp3Reader();
try {
Model model = reader.read(responseStream);
DependencyManagement management = model.getDependencyManagement();
return management.getDependencies().stream().map(dep -> {
String version = getPropertyName(dep.getVersion());
while(model.getProperties().getProperty(version) != null) {
version = getPropertyName(model.getProperties().getProperty(version));
if(version.equals(PROJECT_VERSION)) {
version = model.getVersion();
}
}
if(version == null) {
version = dep.getVersion();
}
BomDependency bomDependency = new BomDependency(dep.getGroupId(), dep.getArtifactId(), version);
return bomDependency;
}).collect(Collectors.toList());
} catch (IOException exception) {
exception.printStackTrace();
} catch (XmlPullParserException e) {
e.printStackTrace();
}
return null;
}
private static String getPropertyName(String propertyValue) {
if(propertyValue.startsWith("${")) {
return propertyValue.substring(2, propertyValue.length() - 1);
}
return propertyValue;
}
} |
are you adding tests where the null values are actual values? | public void runAllClientFunctionsWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAllClientFunctionsWithResponseAsync");
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCallAsync = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
Response<StartCallRecordingResult> startRecordingResponse =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null, null, null).block();
assert startRecordingResponse != null;
assertEquals(startRecordingResponse.getStatusCode(), 200);
StartCallRecordingResult startCallRecordingResult = startRecordingResponse.getValue();
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
Response<Void> pauseResponse = serverCallAsync.pauseRecordingWithResponse(recordingId).block();
assert pauseResponse != null;
assertEquals(pauseResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.INACTIVE);
Response<Void> resumeResponse = serverCallAsync.resumeRecordingWithResponse(recordingId).block();
assert resumeResponse != null;
assertEquals(resumeResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCallAsync != null) {
try {
Response<Void> stopResponse = serverCallAsync.stopRecordingWithResponse(recordingId).block();
assert stopResponse != null;
assertEquals(stopResponse.getStatusCode(), 200);
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
} | serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null, null, null).block(); | public void runAllClientFunctionsWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAllClientFunctionsWithResponseAsync");
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCallAsync = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
Response<StartCallRecordingResult> startRecordingResponse =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null).block();
assert startRecordingResponse != null;
assertEquals(startRecordingResponse.getStatusCode(), 200);
StartCallRecordingResult startCallRecordingResult = startRecordingResponse.getValue();
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
Response<Void> pauseResponse = serverCallAsync.pauseRecordingWithResponse(recordingId).block();
assert pauseResponse != null;
assertEquals(pauseResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.INACTIVE);
Response<Void> resumeResponse = serverCallAsync.resumeRecordingWithResponse(recordingId).block();
assert resumeResponse != null;
assertEquals(resumeResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCallAsync != null) {
try {
Response<Void> stopResponse = serverCallAsync.stopRecordingWithResponse(recordingId).block();
assert stopResponse != null;
assertEquals(stopResponse.getStatusCode(), 200);
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
} | class ServerCallAsyncLiveTests extends CallingServerTestBase {
private final String fromUser = getRandomUserId();
private final String toUser = getRandomUserId();
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForConnectionStringClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForConnectionStringClient");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForConnectionStringClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForTokenCredentialClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForTokenCredentialClient");
CallingServerClientBuilder builder = getCallingServerClientUsingTokenCredential(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForTokenCredentialClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
private void runAllClientFunctionsAsync(String groupId, CallingServerAsyncClient callingServerAsyncClient) {
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCall = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCall = callingServerAsyncClient.initializeServerCall(groupId);
StartCallRecordingResult startCallRecordingResult = serverCall.startRecording(CALLBACK_URI).block();
assert startCallRecordingResult != null;
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
serverCall.pauseRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.INACTIVE);
serverCall.resumeRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCall != null) {
try {
serverCall.stopRecording(recordingId).block();
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioResult playAudioResult =
serverCallAsync.playAudio(AUDIO_FILE_URI, operationContext, CALLBACK_URI, operationContext).block();
CallingServerTestUtils.validatePlayAudioResult(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionWithResponseAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioOptions options = new PlayAudioOptions();
options.setAudioFileId(UUID.randomUUID().toString());
options.setCallbackUri(CALLBACK_URI);
options.setOperationContext(operationContext);
Response<PlayAudioResult> playAudioResult =
serverCallAsync.playAudioWithResponse(
AUDIO_FILE_URI,
options).block();
CallingServerTestUtils.validatePlayAudioResponse(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void startRecordingFailsAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "startRecordingFailsAsync");
String invalidServerCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDkuY29udi5za3lwZS5jb20vY29udi9EZVF2WEJGVVlFV1NNZkFXYno2azN3P2k9MTEmZT02Mzc1NzIyMjk0Mjc0NTI4Nzk=";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(invalidServerCallId);
try {
Response<StartCallRecordingResult> response =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null, null, null).block();
assert response != null;
assertEquals(response.getStatusCode(), 400);
} catch (CallingServerErrorException e) {
assertEquals(e.getResponse().getStatusCode(), 400);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAddRemoveScenarioAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L19JbTJUcm1MejBpLWlaYkZRREtxaGc_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
AddParticipantResult addParticipantResult = serverCallAsync
.addParticipant(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
assert addParticipantResult != null;
String participantId = addParticipantResult.getParticipantId();
serverCallAsync.removeParticipant(participantId).block();
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioWithResponseAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAddRemoveScenarioWithResponseAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L0pndHZNTW5mYUU2N3ViU3FKb19ndFE_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
Response<AddParticipantResult> addParticipantResultResponse =
serverCallAsync
.addParticipantWithResponse(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
CallingServerTestUtils.validateAddParticipantResponse(addParticipantResultResponse);
assert addParticipantResultResponse != null;
String participantId = addParticipantResultResponse.getValue().getParticipantId();
Response<Void> removeResponse = serverCallAsync.removeParticipantWithResponse(participantId).block();
CallingServerTestUtils.validateResponse(removeResponse);
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
private CallingServerAsyncClient setupAsyncClient(CallingServerClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
protected CallingServerClientBuilder addLoggingPolicy(CallingServerClientBuilder builder, String testName) {
return builder.addPolicy((context, next) -> logHeaders(testName, next));
}
private void validateCallRecordingState(ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
CallRecordingProperties callRecordingStateResult = serverCallAsync.getRecordingState(recordingId).block();
assert callRecordingStateResult != null;
assertEquals(callRecordingStateResult.getRecordingState(), expectedCallRecordingState);
}
protected void validateCallRecordingStateWithResponse(
ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
Response<CallRecordingProperties> response =
serverCallAsync.getRecordingStateWithResponse(recordingId).block();
assertNotNull(response);
assertEquals(response.getStatusCode(), 200);
assertNotNull(response.getValue());
assertEquals(response.getValue().getRecordingState(), expectedCallRecordingState);
}
protected void cleanUpConnectionsAsync(List<CallConnectionAsync> connections) {
if (connections == null) {
return;
}
connections.forEach(c -> {
if (c != null) {
try {
c.hangup().block();
} catch (Exception e) {
System.out.println("Error hanging up: " + e.getMessage());
}
}
});
}
} | class ServerCallAsyncLiveTests extends CallingServerTestBase {
private final String fromUser = getRandomUserId();
private final String toUser = getRandomUserId();
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForConnectionStringClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForConnectionStringClient");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForConnectionStringClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForTokenCredentialClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForTokenCredentialClient");
CallingServerClientBuilder builder = getCallingServerClientUsingTokenCredential(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForTokenCredentialClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
private void runAllClientFunctionsAsync(String groupId, CallingServerAsyncClient callingServerAsyncClient) {
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCall = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCall = callingServerAsyncClient.initializeServerCall(groupId);
StartCallRecordingResult startCallRecordingResult = serverCall.startRecording(CALLBACK_URI).block();
assert startCallRecordingResult != null;
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
serverCall.pauseRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.INACTIVE);
serverCall.resumeRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCall != null) {
try {
serverCall.stopRecording(recordingId).block();
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioResult playAudioResult =
serverCallAsync.playAudio(AUDIO_FILE_URI, operationContext, CALLBACK_URI, operationContext).block();
CallingServerTestUtils.validatePlayAudioResult(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionWithResponseAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioOptions options = new PlayAudioOptions();
options.setAudioFileId(UUID.randomUUID().toString());
options.setCallbackUri(CALLBACK_URI);
options.setOperationContext(operationContext);
Response<PlayAudioResult> playAudioResult =
serverCallAsync.playAudioWithResponse(
AUDIO_FILE_URI,
options).block();
CallingServerTestUtils.validatePlayAudioResponse(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void startRecordingFailsAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "startRecordingFailsAsync");
String invalidServerCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDkuY29udi5za3lwZS5jb20vY29udi9EZVF2WEJGVVlFV1NNZkFXYno2azN3P2k9MTEmZT02Mzc1NzIyMjk0Mjc0NTI4Nzk=";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(invalidServerCallId);
try {
Response<StartCallRecordingResult> response =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null).block();
assert response != null;
assertEquals(response.getStatusCode(), 400);
} catch (CallingServerErrorException e) {
assertEquals(e.getResponse().getStatusCode(), 400);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAddRemoveScenarioAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L19JbTJUcm1MejBpLWlaYkZRREtxaGc_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
AddParticipantResult addParticipantResult = serverCallAsync
.addParticipant(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
assert addParticipantResult != null;
String participantId = addParticipantResult.getParticipantId();
serverCallAsync.removeParticipant(participantId).block();
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioWithResponseAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAddRemoveScenarioWithResponseAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L0pndHZNTW5mYUU2N3ViU3FKb19ndFE_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
Response<AddParticipantResult> addParticipantResultResponse =
serverCallAsync
.addParticipantWithResponse(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
CallingServerTestUtils.validateAddParticipantResponse(addParticipantResultResponse);
assert addParticipantResultResponse != null;
String participantId = addParticipantResultResponse.getValue().getParticipantId();
Response<Void> removeResponse = serverCallAsync.removeParticipantWithResponse(participantId).block();
CallingServerTestUtils.validateResponse(removeResponse);
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
private CallingServerAsyncClient setupAsyncClient(CallingServerClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
protected CallingServerClientBuilder addLoggingPolicy(CallingServerClientBuilder builder, String testName) {
return builder.addPolicy((context, next) -> logHeaders(testName, next));
}
private void validateCallRecordingState(ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
CallRecordingProperties callRecordingStateResult = serverCallAsync.getRecordingState(recordingId).block();
assert callRecordingStateResult != null;
assertEquals(callRecordingStateResult.getRecordingState(), expectedCallRecordingState);
}
protected void validateCallRecordingStateWithResponse(
ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
Response<CallRecordingProperties> response =
serverCallAsync.getRecordingStateWithResponse(recordingId).block();
assertNotNull(response);
assertEquals(response.getStatusCode(), 200);
assertNotNull(response.getValue());
assertEquals(response.getValue().getRecordingState(), expectedCallRecordingState);
}
protected void cleanUpConnectionsAsync(List<CallConnectionAsync> connections) {
if (connections == null) {
return;
}
connections.forEach(c -> {
if (c != null) {
try {
c.hangup().block();
} catch (Exception e) {
System.out.println("Error hanging up: " + e.getMessage());
}
}
});
}
} |
does that mean that the `startRecording` method is not going to be tested? | public void startRecordingWithRecordingParamsRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", RecordingChannelType.MIXED, RecordingContentType.AUDIO_VIDEO, RecordingFormatType.MP4, null));
} | () -> serverCall.startRecordingWithResponse("/not/absolute/uri", RecordingChannelType.MIXED, RecordingContentType.AUDIO_VIDEO, RecordingFormatType.MP4, null)); | public void startRecordingWithRecordingParamsRelativeUriFails() {
StartRecordingOptions startRecordingOptions = new StartRecordingOptions();
startRecordingOptions.setRecordingChannelType(RecordingChannelType.MIXED);
startRecordingOptions.setRecordingContentType(RecordingContentType.AUDIO_VIDEO);
startRecordingOptions.setRecordingFormatType(RecordingFormatType.MP4);
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", startRecordingOptions, null));
} | class ServerCallUnitTests {
private final String serverCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDguY29udi5za3lwZS5jb20vY29udi8tby1FWjVpMHJrS3RFTDBNd0FST1J3P2k9ODgmZT02Mzc1Nzc0MTY4MDc4MjQyOTM";
static final String MOCK_CONNECTION_STRING = "endpoint=https:
@Test
public void startRecordingRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri"));
}
@Test
@Test
public void startRecordingWithResponseRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", null, null, null, null));
}
@Test
public void addParticipantNullParticipantFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
NullPointerException.class,
() -> serverCall.addParticipant(null, null, null, null));
}
@Test
public void startRecordingAsyncFails() {
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri")
.block());
}
@Test
public void startRecordingWithRecordingParamsAsyncFails() {
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", RecordingChannelType.MIXED, RecordingContentType.AUDIO_VIDEO, RecordingFormatType.MP4, null)
.block());
}
@Test
public void playAudioWithResponse() {
ServerCall serverCall = getServerCall();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions, Context.NONE);
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudio() {
ServerCall serverCall = getServerCall();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext");
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioWithResponseAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions).block();
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext").block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsyncUsingOptions() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", playAudioOptions).block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void appParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext");
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext", Context.NONE);
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void removeParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
}
@Test
public void removeParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE);
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
@Test
public void removeParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID).block();
}
@Test
public void removeParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE).block();
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
private ServerCall getServerCall() {
return CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
private ServerCallAsync getServerCallAsync() {
return CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
} | class ServerCallUnitTests {
private final String serverCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDguY29udi5za3lwZS5jb20vY29udi8tby1FWjVpMHJrS3RFTDBNd0FST1J3P2k9ODgmZT02Mzc1Nzc0MTY4MDc4MjQyOTM";
static final String MOCK_CONNECTION_STRING = "endpoint=https:
@Test
public void startRecordingRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri"));
}
@Test
@Test
public void startRecordingWithResponseRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", null, null));
}
@Test
public void addParticipantNullParticipantFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
NullPointerException.class,
() -> serverCall.addParticipant(null, null, null, null));
}
@Test
public void startRecordingAsyncFails() {
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri")
.block());
}
@Test
public void startRecordingWithRecordingParamsAsyncFails() {
StartRecordingOptions startRecordingOptions = new StartRecordingOptions();
startRecordingOptions.setRecordingChannelType(RecordingChannelType.MIXED);
startRecordingOptions.setRecordingContentType(RecordingContentType.AUDIO_VIDEO);
startRecordingOptions.setRecordingFormatType(RecordingFormatType.MP4);
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", startRecordingOptions, null)
.block());
}
@Test
public void playAudioWithResponse() {
ServerCall serverCall = getServerCall();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions, Context.NONE);
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudio() {
ServerCall serverCall = getServerCall();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext");
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioWithResponseAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions).block();
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext").block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsyncUsingOptions() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", playAudioOptions).block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void appParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext");
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext", Context.NONE);
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void removeParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
}
@Test
public void removeParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE);
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
@Test
public void removeParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID).block();
}
@Test
public void removeParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE).block();
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
private ServerCall getServerCall() {
return CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
private ServerCallAsync getServerCallAsync() {
return CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
} |
Its is being testing in a different test case already :) | public void startRecordingWithRecordingParamsRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", RecordingChannelType.MIXED, RecordingContentType.AUDIO_VIDEO, RecordingFormatType.MP4, null));
} | () -> serverCall.startRecordingWithResponse("/not/absolute/uri", RecordingChannelType.MIXED, RecordingContentType.AUDIO_VIDEO, RecordingFormatType.MP4, null)); | public void startRecordingWithRecordingParamsRelativeUriFails() {
StartRecordingOptions startRecordingOptions = new StartRecordingOptions();
startRecordingOptions.setRecordingChannelType(RecordingChannelType.MIXED);
startRecordingOptions.setRecordingContentType(RecordingContentType.AUDIO_VIDEO);
startRecordingOptions.setRecordingFormatType(RecordingFormatType.MP4);
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", startRecordingOptions, null));
} | class ServerCallUnitTests {
private final String serverCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDguY29udi5za3lwZS5jb20vY29udi8tby1FWjVpMHJrS3RFTDBNd0FST1J3P2k9ODgmZT02Mzc1Nzc0MTY4MDc4MjQyOTM";
static final String MOCK_CONNECTION_STRING = "endpoint=https:
@Test
public void startRecordingRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri"));
}
@Test
@Test
public void startRecordingWithResponseRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", null, null, null, null));
}
@Test
public void addParticipantNullParticipantFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
NullPointerException.class,
() -> serverCall.addParticipant(null, null, null, null));
}
@Test
public void startRecordingAsyncFails() {
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri")
.block());
}
@Test
public void startRecordingWithRecordingParamsAsyncFails() {
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", RecordingChannelType.MIXED, RecordingContentType.AUDIO_VIDEO, RecordingFormatType.MP4, null)
.block());
}
@Test
public void playAudioWithResponse() {
ServerCall serverCall = getServerCall();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions, Context.NONE);
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudio() {
ServerCall serverCall = getServerCall();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext");
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioWithResponseAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions).block();
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext").block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsyncUsingOptions() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", playAudioOptions).block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void appParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext");
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext", Context.NONE);
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void removeParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
}
@Test
public void removeParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE);
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
@Test
public void removeParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID).block();
}
@Test
public void removeParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE).block();
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
private ServerCall getServerCall() {
return CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
private ServerCallAsync getServerCallAsync() {
return CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
} | class ServerCallUnitTests {
private final String serverCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDguY29udi5za3lwZS5jb20vY29udi8tby1FWjVpMHJrS3RFTDBNd0FST1J3P2k9ODgmZT02Mzc1Nzc0MTY4MDc4MjQyOTM";
static final String MOCK_CONNECTION_STRING = "endpoint=https:
@Test
public void startRecordingRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri"));
}
@Test
@Test
public void startRecordingWithResponseRelativeUriFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", null, null));
}
@Test
public void addParticipantNullParticipantFails() {
ServerCall serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildClient()
.initializeServerCall(serverCallId);
assertThrows(
NullPointerException.class,
() -> serverCall.addParticipant(null, null, null, null));
}
@Test
public void startRecordingAsyncFails() {
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecording("/not/absolute/uri")
.block());
}
@Test
public void startRecordingWithRecordingParamsAsyncFails() {
StartRecordingOptions startRecordingOptions = new StartRecordingOptions();
startRecordingOptions.setRecordingChannelType(RecordingChannelType.MIXED);
startRecordingOptions.setRecordingContentType(RecordingContentType.AUDIO_VIDEO);
startRecordingOptions.setRecordingFormatType(RecordingFormatType.MP4);
ServerCallAsync serverCall = new CallingServerClientBuilder()
.httpClient(new NoOpHttpClient())
.connectionString(MOCK_CONNECTION_STRING)
.buildAsyncClient()
.initializeServerCall(serverCallId);
assertThrows(
InvalidParameterException.class,
() -> serverCall.startRecordingWithResponse("/not/absolute/uri", startRecordingOptions, null)
.block());
}
@Test
public void playAudioWithResponse() {
ServerCall serverCall = getServerCall();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions, Context.NONE);
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudio() {
ServerCall serverCall = getServerCall();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext");
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioWithResponseAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
Response<PlayAudioResult> playAudioResultResponse = serverCall.playAudioWithResponse("audioFileUri", playAudioOptions).block();
assertEquals(202, playAudioResultResponse.getStatusCode());
PlayAudioResult playAudioResult = playAudioResultResponse.getValue();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsync() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", "audioFieldId", "callbackUri", "operationContext").block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void playAudioAsyncUsingOptions() {
ServerCallAsync serverCall = getServerCallAsync();
PlayAudioOptions playAudioOptions = new PlayAudioOptions().setAudioFileId("audioFileId").setCallbackUri("callbackUri");
PlayAudioResult playAudioResult = serverCall.playAudio("audioFileUri", playAudioOptions).block();
assertEquals(OperationStatus.COMPLETED, playAudioResult.getStatus());
}
@Test
public void appParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext");
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext", Context.NONE);
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
AddParticipantResult addParticipantResult = serverCall.addParticipant(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void appParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generateAddParticipantResult(CallingServerResponseMocker.NEW_PARTICIPANT_ID), 202)
))
);
CommunicationUserIdentifier user = new CommunicationUserIdentifier(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
Response<AddParticipantResult> addParticipantResultResponse = serverCall.addParticipantWithResponse(user, CallingServerResponseMocker.URI_CALLBACK, "alternateCallerId", "operationContext").block();
assertEquals(202, addParticipantResultResponse.getStatusCode());
AddParticipantResult addParticipantResult = addParticipantResultResponse.getValue();
assertEquals(user.getId(), addParticipantResult.getParticipantId());
}
@Test
public void removeParticipantServerCall() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID);
}
@Test
public void removeParticipantServerCallWithResponse() {
ServerCall serverCall = CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE);
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
@Test
public void removeParticipantServerCallAsync() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
serverCall.removeParticipant(CallingServerResponseMocker.NEW_PARTICIPANT_ID).block();
}
@Test
public void removeParticipantServerCallAsyncWithResponse() {
ServerCallAsync serverCall = CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>("", 202)
))
);
Response<Void> removeParticipantResultResponse = serverCall.removeParticipantWithResponse(CallingServerResponseMocker.NEW_PARTICIPANT_ID, Context.NONE).block();
assertEquals(202, removeParticipantResultResponse.getStatusCode());
}
private ServerCall getServerCall() {
return CallingServerResponseMocker.getServerCall(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
private ServerCallAsync getServerCallAsync() {
return CallingServerResponseMocker.getServerCallAsync(new ArrayList<SimpleEntry<String, Integer>>(
Arrays.asList(
new SimpleEntry<String, Integer>(CallingServerResponseMocker.generatePlayAudioResult(
CallingServerResponseMocker.OPERATION_ID,
OperationStatus.COMPLETED,
new ResultInfoInternal().setCode(202).setSubcode(0).setMessage("message")),
202)
)));
}
} |
yes, thats added :) | public void runAllClientFunctionsWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAllClientFunctionsWithResponseAsync");
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCallAsync = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
Response<StartCallRecordingResult> startRecordingResponse =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null, null, null).block();
assert startRecordingResponse != null;
assertEquals(startRecordingResponse.getStatusCode(), 200);
StartCallRecordingResult startCallRecordingResult = startRecordingResponse.getValue();
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
Response<Void> pauseResponse = serverCallAsync.pauseRecordingWithResponse(recordingId).block();
assert pauseResponse != null;
assertEquals(pauseResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.INACTIVE);
Response<Void> resumeResponse = serverCallAsync.resumeRecordingWithResponse(recordingId).block();
assert resumeResponse != null;
assertEquals(resumeResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCallAsync != null) {
try {
Response<Void> stopResponse = serverCallAsync.stopRecordingWithResponse(recordingId).block();
assert stopResponse != null;
assertEquals(stopResponse.getStatusCode(), 200);
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
} | serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null, null, null).block(); | public void runAllClientFunctionsWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAllClientFunctionsWithResponseAsync");
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCallAsync = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
Response<StartCallRecordingResult> startRecordingResponse =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null).block();
assert startRecordingResponse != null;
assertEquals(startRecordingResponse.getStatusCode(), 200);
StartCallRecordingResult startCallRecordingResult = startRecordingResponse.getValue();
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
Response<Void> pauseResponse = serverCallAsync.pauseRecordingWithResponse(recordingId).block();
assert pauseResponse != null;
assertEquals(pauseResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.INACTIVE);
Response<Void> resumeResponse = serverCallAsync.resumeRecordingWithResponse(recordingId).block();
assert resumeResponse != null;
assertEquals(resumeResponse.getStatusCode(), 200);
validateCallRecordingStateWithResponse(serverCallAsync, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCallAsync != null) {
try {
Response<Void> stopResponse = serverCallAsync.stopRecordingWithResponse(recordingId).block();
assert stopResponse != null;
assertEquals(stopResponse.getStatusCode(), 200);
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
} | class ServerCallAsyncLiveTests extends CallingServerTestBase {
private final String fromUser = getRandomUserId();
private final String toUser = getRandomUserId();
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForConnectionStringClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForConnectionStringClient");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForConnectionStringClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForTokenCredentialClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForTokenCredentialClient");
CallingServerClientBuilder builder = getCallingServerClientUsingTokenCredential(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForTokenCredentialClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
private void runAllClientFunctionsAsync(String groupId, CallingServerAsyncClient callingServerAsyncClient) {
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCall = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCall = callingServerAsyncClient.initializeServerCall(groupId);
StartCallRecordingResult startCallRecordingResult = serverCall.startRecording(CALLBACK_URI).block();
assert startCallRecordingResult != null;
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
serverCall.pauseRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.INACTIVE);
serverCall.resumeRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCall != null) {
try {
serverCall.stopRecording(recordingId).block();
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioResult playAudioResult =
serverCallAsync.playAudio(AUDIO_FILE_URI, operationContext, CALLBACK_URI, operationContext).block();
CallingServerTestUtils.validatePlayAudioResult(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionWithResponseAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioOptions options = new PlayAudioOptions();
options.setAudioFileId(UUID.randomUUID().toString());
options.setCallbackUri(CALLBACK_URI);
options.setOperationContext(operationContext);
Response<PlayAudioResult> playAudioResult =
serverCallAsync.playAudioWithResponse(
AUDIO_FILE_URI,
options).block();
CallingServerTestUtils.validatePlayAudioResponse(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void startRecordingFailsAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "startRecordingFailsAsync");
String invalidServerCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDkuY29udi5za3lwZS5jb20vY29udi9EZVF2WEJGVVlFV1NNZkFXYno2azN3P2k9MTEmZT02Mzc1NzIyMjk0Mjc0NTI4Nzk=";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(invalidServerCallId);
try {
Response<StartCallRecordingResult> response =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null, null, null).block();
assert response != null;
assertEquals(response.getStatusCode(), 400);
} catch (CallingServerErrorException e) {
assertEquals(e.getResponse().getStatusCode(), 400);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAddRemoveScenarioAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L19JbTJUcm1MejBpLWlaYkZRREtxaGc_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
AddParticipantResult addParticipantResult = serverCallAsync
.addParticipant(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
assert addParticipantResult != null;
String participantId = addParticipantResult.getParticipantId();
serverCallAsync.removeParticipant(participantId).block();
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioWithResponseAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAddRemoveScenarioWithResponseAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L0pndHZNTW5mYUU2N3ViU3FKb19ndFE_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
Response<AddParticipantResult> addParticipantResultResponse =
serverCallAsync
.addParticipantWithResponse(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
CallingServerTestUtils.validateAddParticipantResponse(addParticipantResultResponse);
assert addParticipantResultResponse != null;
String participantId = addParticipantResultResponse.getValue().getParticipantId();
Response<Void> removeResponse = serverCallAsync.removeParticipantWithResponse(participantId).block();
CallingServerTestUtils.validateResponse(removeResponse);
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
private CallingServerAsyncClient setupAsyncClient(CallingServerClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
protected CallingServerClientBuilder addLoggingPolicy(CallingServerClientBuilder builder, String testName) {
return builder.addPolicy((context, next) -> logHeaders(testName, next));
}
private void validateCallRecordingState(ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
CallRecordingProperties callRecordingStateResult = serverCallAsync.getRecordingState(recordingId).block();
assert callRecordingStateResult != null;
assertEquals(callRecordingStateResult.getRecordingState(), expectedCallRecordingState);
}
protected void validateCallRecordingStateWithResponse(
ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
Response<CallRecordingProperties> response =
serverCallAsync.getRecordingStateWithResponse(recordingId).block();
assertNotNull(response);
assertEquals(response.getStatusCode(), 200);
assertNotNull(response.getValue());
assertEquals(response.getValue().getRecordingState(), expectedCallRecordingState);
}
protected void cleanUpConnectionsAsync(List<CallConnectionAsync> connections) {
if (connections == null) {
return;
}
connections.forEach(c -> {
if (c != null) {
try {
c.hangup().block();
} catch (Exception e) {
System.out.println("Error hanging up: " + e.getMessage());
}
}
});
}
} | class ServerCallAsyncLiveTests extends CallingServerTestBase {
private final String fromUser = getRandomUserId();
private final String toUser = getRandomUserId();
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForConnectionStringClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForConnectionStringClient");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForConnectionStringClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runAllClientFunctionsForTokenCredentialClient(HttpClient httpClient) {
String groupId = getGroupId("runAllClientFunctionsForTokenCredentialClient");
CallingServerClientBuilder builder = getCallingServerClientUsingTokenCredential(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAllClientFunctionsForTokenCredentialClient");
runAllClientFunctionsAsync(groupId, callingServerAsyncClient);
}
private void runAllClientFunctionsAsync(String groupId, CallingServerAsyncClient callingServerAsyncClient) {
String recordingId = "";
List<CallConnectionAsync> callConnections = new ArrayList<>();
ServerCallAsync serverCall = null;
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCall = callingServerAsyncClient.initializeServerCall(groupId);
StartCallRecordingResult startCallRecordingResult = serverCall.startRecording(CALLBACK_URI).block();
assert startCallRecordingResult != null;
recordingId = startCallRecordingResult.getRecordingId();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
serverCall.pauseRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.INACTIVE);
serverCall.resumeRecording(recordingId).block();
validateCallRecordingState(serverCall, recordingId, CallRecordingState.ACTIVE);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
if (serverCall != null) {
try {
serverCall.stopRecording(recordingId).block();
} catch (Exception e) {
System.out.println("Error stopping recording: " + e.getMessage());
}
}
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioResult playAudioResult =
serverCallAsync.playAudio(AUDIO_FILE_URI, operationContext, CALLBACK_URI, operationContext).block();
CallingServerTestUtils.validatePlayAudioResult(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void runPlayAudioFunctionWithResponseAsync(HttpClient httpClient) {
String groupId = getGroupId("runPlayAudioFunctionWithResponseAsync");
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runPlayAudioFunctionWithResponseAsync");
ServerCallAsync serverCallAsync;
List<CallConnectionAsync> callConnections = new ArrayList<>();
String operationContext = UUID.randomUUID().toString();
try {
callConnections = createAsyncCall(callingServerAsyncClient, groupId, fromUser, toUser, CALLBACK_URI);
serverCallAsync = callingServerAsyncClient.initializeServerCall(groupId);
PlayAudioOptions options = new PlayAudioOptions();
options.setAudioFileId(UUID.randomUUID().toString());
options.setCallbackUri(CALLBACK_URI);
options.setOperationContext(operationContext);
Response<PlayAudioResult> playAudioResult =
serverCallAsync.playAudioWithResponse(
AUDIO_FILE_URI,
options).block();
CallingServerTestUtils.validatePlayAudioResponse(playAudioResult);
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
} finally {
cleanUpConnectionsAsync(callConnections);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
public void startRecordingFailsAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "startRecordingFailsAsync");
String invalidServerCallId = "aHR0cHM6Ly9jb252LXVzd2UtMDkuY29udi5za3lwZS5jb20vY29udi9EZVF2WEJGVVlFV1NNZkFXYno2azN3P2k9MTEmZT02Mzc1NzIyMjk0Mjc0NTI4Nzk=";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(invalidServerCallId);
try {
Response<StartCallRecordingResult> response =
serverCallAsync.startRecordingWithResponse(CALLBACK_URI, null, null).block();
assert response != null;
assertEquals(response.getStatusCode(), 400);
} catch (CallingServerErrorException e) {
assertEquals(e.getResponse().getStatusCode(), 400);
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient =
setupAsyncClient(builder, "runAddRemoveScenarioAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L19JbTJUcm1MejBpLWlaYkZRREtxaGc_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
AddParticipantResult addParticipantResult = serverCallAsync
.addParticipant(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
assert addParticipantResult != null;
String participantId = addParticipantResult.getParticipantId();
serverCallAsync.removeParticipant(participantId).block();
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
@ParameterizedTest
@MethodSource("com.azure.core.test.TestBase
@DisabledIfEnvironmentVariable(
named = "SKIP_LIVE_TEST",
matches = "(?i)(true)",
disabledReason = "Requires human intervention")
public void runAddRemoveScenarioWithResponseAsync(HttpClient httpClient) {
CallingServerClientBuilder builder = getCallingServerClientUsingConnectionString(httpClient);
CallingServerAsyncClient callingServerAsyncClient = setupAsyncClient(builder, "runAddRemoveScenarioWithResponseAsync");
try {
CreateCallOptions options = new CreateCallOptions(
CALLBACK_URI,
Collections.singletonList(MediaType.AUDIO),
Collections.singletonList(EventSubscriptionType.PARTICIPANTS_UPDATED));
options.setAlternateCallerId(new PhoneNumberIdentifier(FROM_PHONE_NUMBER));
CallConnectionAsync callConnectionAsync = callingServerAsyncClient.createCallConnection(
new CommunicationUserIdentifier(fromUser),
Collections.singletonList(new PhoneNumberIdentifier(TO_PHONE_NUMBER)),
options).block();
CallingServerTestUtils.validateCallConnectionAsync(callConnectionAsync);
/*
Waiting for an update to be able to get this serverCallId when using
createCallConnection()
*/
String serverCallId = "aHR0cHM6Ly94LWNvbnYtdXN3ZS0wMS5jb252LnNreXBlLmNvbS9jb252L0pndHZNTW5mYUU2N3ViU3FKb19ndFE_aT0xJmU9NjM3NTg0MzkzMzg3ODg3MDI3";
ServerCallAsync serverCallAsync = callingServerAsyncClient.initializeServerCall(serverCallId);
String operationContext = UUID.randomUUID().toString();
Response<AddParticipantResult> addParticipantResultResponse =
serverCallAsync
.addParticipantWithResponse(
new CommunicationUserIdentifier(toUser),
null,
operationContext,
CALLBACK_URI)
.block();
CallingServerTestUtils.validateAddParticipantResponse(addParticipantResultResponse);
assert addParticipantResultResponse != null;
String participantId = addParticipantResultResponse.getValue().getParticipantId();
Response<Void> removeResponse = serverCallAsync.removeParticipantWithResponse(participantId).block();
CallingServerTestUtils.validateResponse(removeResponse);
assert callConnectionAsync != null;
callConnectionAsync.hangup().block();
} catch (Exception e) {
System.out.println("Error: " + e.getMessage());
throw e;
}
}
private CallingServerAsyncClient setupAsyncClient(CallingServerClientBuilder builder, String testName) {
return addLoggingPolicy(builder, testName).buildAsyncClient();
}
protected CallingServerClientBuilder addLoggingPolicy(CallingServerClientBuilder builder, String testName) {
return builder.addPolicy((context, next) -> logHeaders(testName, next));
}
private void validateCallRecordingState(ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
CallRecordingProperties callRecordingStateResult = serverCallAsync.getRecordingState(recordingId).block();
assert callRecordingStateResult != null;
assertEquals(callRecordingStateResult.getRecordingState(), expectedCallRecordingState);
}
protected void validateCallRecordingStateWithResponse(
ServerCallAsync serverCallAsync,
String recordingId,
CallRecordingState expectedCallRecordingState) {
assertNotNull(serverCallAsync);
assertNotNull(serverCallAsync.getServerCallId());
assertNotNull(recordingId);
sleepIfRunningAgainstService(6000);
Response<CallRecordingProperties> response =
serverCallAsync.getRecordingStateWithResponse(recordingId).block();
assertNotNull(response);
assertEquals(response.getStatusCode(), 200);
assertNotNull(response.getValue());
assertEquals(response.getValue().getRecordingState(), expectedCallRecordingState);
}
protected void cleanUpConnectionsAsync(List<CallConnectionAsync> connections) {
if (connections == null) {
return;
}
connections.forEach(c -> {
if (c != null) {
try {
c.hangup().block();
} catch (Exception e) {
System.out.println("Error hanging up: " + e.getMessage());
}
}
});
}
} |
is possible to have Pattern.compile(null)? | public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) {
Objects.requireNonNull(recordedData, "'recordedData' cannot be null.");
this.recordedData = recordedData;
this.textReplacementRules = new HashMap<>();
if (textReplacementRules != null) {
for (Map.Entry<String, String> kvp : textReplacementRules.entrySet()) {
this.textReplacementRules.put(Pattern.compile(kvp.getKey()), kvp.getValue());
}
}
} | this.textReplacementRules.put(Pattern.compile(kvp.getKey()), kvp.getValue()); | public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) {
Objects.requireNonNull(recordedData, "'recordedData' cannot be null.");
this.recordedData = recordedData;
this.textReplacementRules = new HashMap<>();
if (textReplacementRules != null) {
for (Map.Entry<String, String> kvp : textReplacementRules.entrySet()) {
this.textReplacementRules.put(Pattern.compile(kvp.getKey()), kvp.getValue());
}
}
} | class PlaybackClient implements HttpClient {
private static final String X_MS_CLIENT_REQUEST_ID = "x-ms-client-request-id";
private static final String X_MS_ENCRYPTION_KEY_SHA256 = "x-ms-encryption-key-sha256";
private static final Pattern DOUBLE_SLASH_CLEANER = Pattern.compile("(?<!https?:)\\/\\/");
private static final Pattern ARRAYS_TO_STRING_SPLIT = Pattern.compile(", ");
private final ClientLogger logger = new ClientLogger(PlaybackClient.class);
private final AtomicInteger count = new AtomicInteger(0);
private final Map<Pattern, String> textReplacementRules;
private final RecordedData recordedData;
/**
* Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link
* NetworkCallRecord
*
* @param recordedData The data to playback.
* @param textReplacementRules A set of rules to replace text in network call responses.
*/
/**
* {@inheritDoc}
*/
@Override
public Mono<HttpResponse> send(final HttpRequest request) {
return Mono.defer(() -> playbackHttpResponse(request));
}
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) {
final String incomingUrl = applyReplacementRules(request.getUrl().toString());
final String incomingMethod = request.getHttpMethod().toString();
final String matchingUrl = removeHost(incomingUrl);
NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> {
if (!record.getMethod().equalsIgnoreCase(incomingMethod)) {
return false;
}
String removedHostUri = removeHost(record.getUri());
String cleanedHostUri = DOUBLE_SLASH_CLEANER.matcher(removedHostUri).replaceAll("/");
String cleanedMatchingUrl = DOUBLE_SLASH_CLEANER.matcher(matchingUrl).replaceAll("/");
return cleanedHostUri.equalsIgnoreCase(cleanedMatchingUrl);
});
count.incrementAndGet();
if (networkCallRecord == null) {
logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl);
logger.warning("Records requested: {}.", count);
return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl));
}
if (networkCallRecord.getException() != null) {
throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.getException().get()));
}
if (networkCallRecord.getHeaders().containsKey(X_MS_CLIENT_REQUEST_ID)) {
request.setHeader(X_MS_CLIENT_REQUEST_ID, networkCallRecord.getHeaders().get(X_MS_CLIENT_REQUEST_ID));
}
if (request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256) != null) {
networkCallRecord.getResponse().put(X_MS_ENCRYPTION_KEY_SHA256,
request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256));
}
int recordStatusCode = Integer.parseInt(networkCallRecord.getResponse().get("StatusCode"));
HttpHeaders headers = new HttpHeaders();
for (Map.Entry<String, String> pair : networkCallRecord.getResponse().entrySet()) {
if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) {
headers.set(pair.getKey(), applyReplacementRules(pair.getValue()));
}
}
String rawBody = networkCallRecord.getResponse().get("Body");
byte[] bytes = null;
if (rawBody != null) {
rawBody = applyReplacementRules(rawBody);
String contentType = networkCallRecord.getResponse().get("Content-Type");
/*
* The Body Content-Type is application/octet-stream or avro/binary, those use a custom format to be written
* to disk. In older versions of azure-core-test this used Arrays.toString(), bodies saved using this format
* will begin with '[' and end with ']'. The new format for persisting these Content-Types is Base64
* encoding. Base64 encoding is more compact as Arrays.toString() will separate each byte with ', ', adding
* (2 * byte[].length) - 1 additional characters, additionally each byte on average takes 2-3 characters to
* be written to disk [-128,127). Base64 encoding only takes about 4 characters per 3 bytes, this results
* in a drastically smaller size on disk. In addition to a smaller size on disk, loading the body when it
* is Base64 encoded is much faster as it doesn't require string splitting.
*/
if (contentType != null
&& (contentType.equalsIgnoreCase(ContentType.APPLICATION_OCTET_STREAM)
|| contentType.equalsIgnoreCase("avro/binary"))) {
if (rawBody.startsWith("[") && rawBody.endsWith("]")) {
/*
* Body is encoded using the old Arrays.toString() format. Remove the leading '[' and trailing ']'
* and split the string into individual bytes using ', '.
*/
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (String piece : ARRAYS_TO_STRING_SPLIT.split(rawBody.substring(1, rawBody.length() - 1))) {
outputStream.write(Byte.parseByte(piece));
}
bytes = outputStream.toByteArray();
} else {
/*
* Body is encoded using the Base64 encoded format, simply Base64 decode it.
*/
bytes = Base64.getDecoder().decode(rawBody);
}
} else {
bytes = rawBody.getBytes(StandardCharsets.UTF_8);
}
if (bytes.length > 0) {
headers.set("Content-Length", String.valueOf(bytes.length));
}
}
HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes);
return Mono.just(response);
}
private String applyReplacementRules(String text) {
for (Map.Entry<Pattern, String> rule : textReplacementRules.entrySet()) {
if (rule.getValue() != null) {
text = rule.getKey().matcher(text).replaceAll(rule.getValue());
}
}
return text;
}
private static String removeHost(String url) {
UrlBuilder urlBuilder = UrlBuilder.parse(url);
if (urlBuilder.getQuery().containsKey("sig")) {
urlBuilder.setQueryParameter("sig", "REDACTED");
}
return String.format("%s%s", urlBuilder.getPath(), urlBuilder.getQueryString());
}
} | class PlaybackClient implements HttpClient {
private static final String X_MS_CLIENT_REQUEST_ID = "x-ms-client-request-id";
private static final String X_MS_ENCRYPTION_KEY_SHA256 = "x-ms-encryption-key-sha256";
private static final Pattern DOUBLE_SLASH_CLEANER = Pattern.compile("(?<!https?:)\\/\\/");
private static final Pattern ARRAYS_TO_STRING_SPLIT = Pattern.compile(", ");
private final ClientLogger logger = new ClientLogger(PlaybackClient.class);
private final AtomicInteger count = new AtomicInteger(0);
private final Map<Pattern, String> textReplacementRules;
private final RecordedData recordedData;
/**
* Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link
* NetworkCallRecord
*
* @param recordedData The data to playback.
* @param textReplacementRules A set of rules to replace text in network call responses.
*/
/**
* {@inheritDoc}
*/
@Override
public Mono<HttpResponse> send(final HttpRequest request) {
return Mono.defer(() -> playbackHttpResponse(request));
}
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) {
final String incomingUrl = applyReplacementRules(request.getUrl().toString());
final String incomingMethod = request.getHttpMethod().toString();
final String matchingUrl = removeHost(incomingUrl);
NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> {
if (!record.getMethod().equalsIgnoreCase(incomingMethod)) {
return false;
}
String removedHostUri = removeHost(record.getUri());
String cleanedHostUri = DOUBLE_SLASH_CLEANER.matcher(removedHostUri).replaceAll("/");
String cleanedMatchingUrl = DOUBLE_SLASH_CLEANER.matcher(matchingUrl).replaceAll("/");
return cleanedHostUri.equalsIgnoreCase(cleanedMatchingUrl);
});
count.incrementAndGet();
if (networkCallRecord == null) {
logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl);
logger.warning("Records requested: {}.", count);
return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl));
}
if (networkCallRecord.getException() != null) {
throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.getException().get()));
}
if (networkCallRecord.getHeaders().containsKey(X_MS_CLIENT_REQUEST_ID)) {
request.setHeader(X_MS_CLIENT_REQUEST_ID, networkCallRecord.getHeaders().get(X_MS_CLIENT_REQUEST_ID));
}
if (request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256) != null) {
networkCallRecord.getResponse().put(X_MS_ENCRYPTION_KEY_SHA256,
request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256));
}
int recordStatusCode = Integer.parseInt(networkCallRecord.getResponse().get("StatusCode"));
HttpHeaders headers = new HttpHeaders();
for (Map.Entry<String, String> pair : networkCallRecord.getResponse().entrySet()) {
if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) {
headers.set(pair.getKey(), applyReplacementRules(pair.getValue()));
}
}
String rawBody = networkCallRecord.getResponse().get("Body");
byte[] bytes = null;
if (rawBody != null) {
rawBody = applyReplacementRules(rawBody);
String contentType = networkCallRecord.getResponse().get("Content-Type");
/*
* The Body Content-Type is application/octet-stream or avro/binary, those use a custom format to be written
* to disk. In older versions of azure-core-test this used Arrays.toString(), bodies saved using this format
* will begin with '[' and end with ']'. The new format for persisting these Content-Types is Base64
* encoding. Base64 encoding is more compact as Arrays.toString() will separate each byte with ', ', adding
* (2 * byte[].length) - 1 additional characters, additionally each byte on average takes 2-3 characters to
* be written to disk [-128,127). Base64 encoding only takes about 4 characters per 3 bytes, this results
* in a drastically smaller size on disk. In addition to a smaller size on disk, loading the body when it
* is Base64 encoded is much faster as it doesn't require string splitting.
*/
if (contentType != null
&& (contentType.equalsIgnoreCase(ContentType.APPLICATION_OCTET_STREAM)
|| contentType.equalsIgnoreCase("avro/binary"))) {
if (rawBody.startsWith("[") && rawBody.endsWith("]")) {
/*
* Body is encoded using the old Arrays.toString() format. Remove the leading '[' and trailing ']'
* and split the string into individual bytes using ', '.
*/
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (String piece : ARRAYS_TO_STRING_SPLIT.split(rawBody.substring(1, rawBody.length() - 1))) {
outputStream.write(Byte.parseByte(piece));
}
bytes = outputStream.toByteArray();
} else {
/*
* Body is encoded using the Base64 encoded format, simply Base64 decode it.
*/
bytes = Base64.getDecoder().decode(rawBody);
}
} else {
bytes = rawBody.getBytes(StandardCharsets.UTF_8);
}
if (bytes.length > 0) {
headers.set("Content-Length", String.valueOf(bytes.length));
}
}
HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes);
return Mono.just(response);
}
private String applyReplacementRules(String text) {
for (Map.Entry<Pattern, String> rule : textReplacementRules.entrySet()) {
if (rule.getValue() != null) {
text = rule.getKey().matcher(text).replaceAll(rule.getValue());
}
}
return text;
}
private static String removeHost(String url) {
UrlBuilder urlBuilder = UrlBuilder.parse(url);
if (urlBuilder.getQuery().containsKey("sig")) {
urlBuilder.setQueryParameter("sig", "REDACTED");
}
return String.format("%s%s", urlBuilder.getPath(), urlBuilder.getQueryString());
}
} |
Nope, and this would have failed previously when using the `String.replaceAll`. | public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) {
Objects.requireNonNull(recordedData, "'recordedData' cannot be null.");
this.recordedData = recordedData;
this.textReplacementRules = new HashMap<>();
if (textReplacementRules != null) {
for (Map.Entry<String, String> kvp : textReplacementRules.entrySet()) {
this.textReplacementRules.put(Pattern.compile(kvp.getKey()), kvp.getValue());
}
}
} | this.textReplacementRules.put(Pattern.compile(kvp.getKey()), kvp.getValue()); | public PlaybackClient(RecordedData recordedData, Map<String, String> textReplacementRules) {
Objects.requireNonNull(recordedData, "'recordedData' cannot be null.");
this.recordedData = recordedData;
this.textReplacementRules = new HashMap<>();
if (textReplacementRules != null) {
for (Map.Entry<String, String> kvp : textReplacementRules.entrySet()) {
this.textReplacementRules.put(Pattern.compile(kvp.getKey()), kvp.getValue());
}
}
} | class PlaybackClient implements HttpClient {
private static final String X_MS_CLIENT_REQUEST_ID = "x-ms-client-request-id";
private static final String X_MS_ENCRYPTION_KEY_SHA256 = "x-ms-encryption-key-sha256";
private static final Pattern DOUBLE_SLASH_CLEANER = Pattern.compile("(?<!https?:)\\/\\/");
private static final Pattern ARRAYS_TO_STRING_SPLIT = Pattern.compile(", ");
private final ClientLogger logger = new ClientLogger(PlaybackClient.class);
private final AtomicInteger count = new AtomicInteger(0);
private final Map<Pattern, String> textReplacementRules;
private final RecordedData recordedData;
/**
* Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link
* NetworkCallRecord
*
* @param recordedData The data to playback.
* @param textReplacementRules A set of rules to replace text in network call responses.
*/
/**
* {@inheritDoc}
*/
@Override
public Mono<HttpResponse> send(final HttpRequest request) {
return Mono.defer(() -> playbackHttpResponse(request));
}
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) {
final String incomingUrl = applyReplacementRules(request.getUrl().toString());
final String incomingMethod = request.getHttpMethod().toString();
final String matchingUrl = removeHost(incomingUrl);
NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> {
if (!record.getMethod().equalsIgnoreCase(incomingMethod)) {
return false;
}
String removedHostUri = removeHost(record.getUri());
String cleanedHostUri = DOUBLE_SLASH_CLEANER.matcher(removedHostUri).replaceAll("/");
String cleanedMatchingUrl = DOUBLE_SLASH_CLEANER.matcher(matchingUrl).replaceAll("/");
return cleanedHostUri.equalsIgnoreCase(cleanedMatchingUrl);
});
count.incrementAndGet();
if (networkCallRecord == null) {
logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl);
logger.warning("Records requested: {}.", count);
return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl));
}
if (networkCallRecord.getException() != null) {
throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.getException().get()));
}
if (networkCallRecord.getHeaders().containsKey(X_MS_CLIENT_REQUEST_ID)) {
request.setHeader(X_MS_CLIENT_REQUEST_ID, networkCallRecord.getHeaders().get(X_MS_CLIENT_REQUEST_ID));
}
if (request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256) != null) {
networkCallRecord.getResponse().put(X_MS_ENCRYPTION_KEY_SHA256,
request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256));
}
int recordStatusCode = Integer.parseInt(networkCallRecord.getResponse().get("StatusCode"));
HttpHeaders headers = new HttpHeaders();
for (Map.Entry<String, String> pair : networkCallRecord.getResponse().entrySet()) {
if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) {
headers.set(pair.getKey(), applyReplacementRules(pair.getValue()));
}
}
String rawBody = networkCallRecord.getResponse().get("Body");
byte[] bytes = null;
if (rawBody != null) {
rawBody = applyReplacementRules(rawBody);
String contentType = networkCallRecord.getResponse().get("Content-Type");
/*
* The Body Content-Type is application/octet-stream or avro/binary, those use a custom format to be written
* to disk. In older versions of azure-core-test this used Arrays.toString(), bodies saved using this format
* will begin with '[' and end with ']'. The new format for persisting these Content-Types is Base64
* encoding. Base64 encoding is more compact as Arrays.toString() will separate each byte with ', ', adding
* (2 * byte[].length) - 1 additional characters, additionally each byte on average takes 2-3 characters to
* be written to disk [-128,127). Base64 encoding only takes about 4 characters per 3 bytes, this results
* in a drastically smaller size on disk. In addition to a smaller size on disk, loading the body when it
* is Base64 encoded is much faster as it doesn't require string splitting.
*/
if (contentType != null
&& (contentType.equalsIgnoreCase(ContentType.APPLICATION_OCTET_STREAM)
|| contentType.equalsIgnoreCase("avro/binary"))) {
if (rawBody.startsWith("[") && rawBody.endsWith("]")) {
/*
* Body is encoded using the old Arrays.toString() format. Remove the leading '[' and trailing ']'
* and split the string into individual bytes using ', '.
*/
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (String piece : ARRAYS_TO_STRING_SPLIT.split(rawBody.substring(1, rawBody.length() - 1))) {
outputStream.write(Byte.parseByte(piece));
}
bytes = outputStream.toByteArray();
} else {
/*
* Body is encoded using the Base64 encoded format, simply Base64 decode it.
*/
bytes = Base64.getDecoder().decode(rawBody);
}
} else {
bytes = rawBody.getBytes(StandardCharsets.UTF_8);
}
if (bytes.length > 0) {
headers.set("Content-Length", String.valueOf(bytes.length));
}
}
HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes);
return Mono.just(response);
}
private String applyReplacementRules(String text) {
for (Map.Entry<Pattern, String> rule : textReplacementRules.entrySet()) {
if (rule.getValue() != null) {
text = rule.getKey().matcher(text).replaceAll(rule.getValue());
}
}
return text;
}
private static String removeHost(String url) {
UrlBuilder urlBuilder = UrlBuilder.parse(url);
if (urlBuilder.getQuery().containsKey("sig")) {
urlBuilder.setQueryParameter("sig", "REDACTED");
}
return String.format("%s%s", urlBuilder.getPath(), urlBuilder.getQueryString());
}
} | class PlaybackClient implements HttpClient {
private static final String X_MS_CLIENT_REQUEST_ID = "x-ms-client-request-id";
private static final String X_MS_ENCRYPTION_KEY_SHA256 = "x-ms-encryption-key-sha256";
private static final Pattern DOUBLE_SLASH_CLEANER = Pattern.compile("(?<!https?:)\\/\\/");
private static final Pattern ARRAYS_TO_STRING_SPLIT = Pattern.compile(", ");
private final ClientLogger logger = new ClientLogger(PlaybackClient.class);
private final AtomicInteger count = new AtomicInteger(0);
private final Map<Pattern, String> textReplacementRules;
private final RecordedData recordedData;
/**
* Creates a PlaybackClient that replays network calls from {@code recordedData} and replaces {@link
* NetworkCallRecord
*
* @param recordedData The data to playback.
* @param textReplacementRules A set of rules to replace text in network call responses.
*/
/**
* {@inheritDoc}
*/
@Override
public Mono<HttpResponse> send(final HttpRequest request) {
return Mono.defer(() -> playbackHttpResponse(request));
}
private Mono<HttpResponse> playbackHttpResponse(final HttpRequest request) {
final String incomingUrl = applyReplacementRules(request.getUrl().toString());
final String incomingMethod = request.getHttpMethod().toString();
final String matchingUrl = removeHost(incomingUrl);
NetworkCallRecord networkCallRecord = recordedData.findFirstAndRemoveNetworkCall(record -> {
if (!record.getMethod().equalsIgnoreCase(incomingMethod)) {
return false;
}
String removedHostUri = removeHost(record.getUri());
String cleanedHostUri = DOUBLE_SLASH_CLEANER.matcher(removedHostUri).replaceAll("/");
String cleanedMatchingUrl = DOUBLE_SLASH_CLEANER.matcher(matchingUrl).replaceAll("/");
return cleanedHostUri.equalsIgnoreCase(cleanedMatchingUrl);
});
count.incrementAndGet();
if (networkCallRecord == null) {
logger.warning("NOT FOUND - Method: {} URL: {}", incomingMethod, incomingUrl);
logger.warning("Records requested: {}.", count);
return Mono.error(new IllegalStateException("==> Unexpected request: " + incomingMethod + " " + incomingUrl));
}
if (networkCallRecord.getException() != null) {
throw logger.logExceptionAsWarning(Exceptions.propagate(networkCallRecord.getException().get()));
}
if (networkCallRecord.getHeaders().containsKey(X_MS_CLIENT_REQUEST_ID)) {
request.setHeader(X_MS_CLIENT_REQUEST_ID, networkCallRecord.getHeaders().get(X_MS_CLIENT_REQUEST_ID));
}
if (request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256) != null) {
networkCallRecord.getResponse().put(X_MS_ENCRYPTION_KEY_SHA256,
request.getHeaders().getValue(X_MS_ENCRYPTION_KEY_SHA256));
}
int recordStatusCode = Integer.parseInt(networkCallRecord.getResponse().get("StatusCode"));
HttpHeaders headers = new HttpHeaders();
for (Map.Entry<String, String> pair : networkCallRecord.getResponse().entrySet()) {
if (!pair.getKey().equals("StatusCode") && !pair.getKey().equals("Body")) {
headers.set(pair.getKey(), applyReplacementRules(pair.getValue()));
}
}
String rawBody = networkCallRecord.getResponse().get("Body");
byte[] bytes = null;
if (rawBody != null) {
rawBody = applyReplacementRules(rawBody);
String contentType = networkCallRecord.getResponse().get("Content-Type");
/*
* The Body Content-Type is application/octet-stream or avro/binary, those use a custom format to be written
* to disk. In older versions of azure-core-test this used Arrays.toString(), bodies saved using this format
* will begin with '[' and end with ']'. The new format for persisting these Content-Types is Base64
* encoding. Base64 encoding is more compact as Arrays.toString() will separate each byte with ', ', adding
* (2 * byte[].length) - 1 additional characters, additionally each byte on average takes 2-3 characters to
* be written to disk [-128,127). Base64 encoding only takes about 4 characters per 3 bytes, this results
* in a drastically smaller size on disk. In addition to a smaller size on disk, loading the body when it
* is Base64 encoded is much faster as it doesn't require string splitting.
*/
if (contentType != null
&& (contentType.equalsIgnoreCase(ContentType.APPLICATION_OCTET_STREAM)
|| contentType.equalsIgnoreCase("avro/binary"))) {
if (rawBody.startsWith("[") && rawBody.endsWith("]")) {
/*
* Body is encoded using the old Arrays.toString() format. Remove the leading '[' and trailing ']'
* and split the string into individual bytes using ', '.
*/
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (String piece : ARRAYS_TO_STRING_SPLIT.split(rawBody.substring(1, rawBody.length() - 1))) {
outputStream.write(Byte.parseByte(piece));
}
bytes = outputStream.toByteArray();
} else {
/*
* Body is encoded using the Base64 encoded format, simply Base64 decode it.
*/
bytes = Base64.getDecoder().decode(rawBody);
}
} else {
bytes = rawBody.getBytes(StandardCharsets.UTF_8);
}
if (bytes.length > 0) {
headers.set("Content-Length", String.valueOf(bytes.length));
}
}
HttpResponse response = new MockHttpResponse(request, recordStatusCode, headers, bytes);
return Mono.just(response);
}
private String applyReplacementRules(String text) {
for (Map.Entry<Pattern, String> rule : textReplacementRules.entrySet()) {
if (rule.getValue() != null) {
text = rule.getKey().matcher(text).replaceAll(rule.getValue());
}
}
return text;
}
private static String removeHost(String url) {
UrlBuilder urlBuilder = UrlBuilder.parse(url);
if (urlBuilder.getQuery().containsKey("sig")) {
urlBuilder.setQueryParameter("sig", "REDACTED");
}
return String.format("%s%s", urlBuilder.getPath(), urlBuilder.getQueryString());
}
} |
We should also start using the supplier to log. | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl());
if (overwrite || urlBuilder.getScheme() == null) {
logger.verbose("Setting protocol to {}", protocol);
try {
context.getHttpRequest().setUrl(urlBuilder.setScheme(protocol).toUrl());
} catch (MalformedURLException e) {
return Mono.error(new RuntimeException(
String.format("Failed to set the HTTP request protocol to %s.", protocol), e));
}
}
return next.process();
} | logger.verbose("Setting protocol to {}", protocol); | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl());
if (overwrite || urlBuilder.getScheme() == null) {
logger.log(LogLevel.VERBOSE, () -> "Setting protocol to " + protocol);
try {
context.getHttpRequest().setUrl(urlBuilder.setScheme(protocol).toUrl());
} catch (MalformedURLException e) {
return Mono.error(new RuntimeException(
String.format("Failed to set the HTTP request protocol to %s.", protocol), e));
}
}
return next.process();
} | class ProtocolPolicy implements HttpPipelinePolicy {
private final String protocol;
private final boolean overwrite;
private final ClientLogger logger = new ClientLogger(ProtocolPolicy.class);
/**
* Creates a new ProtocolPolicy.
*
* @param protocol The protocol to set.
* @param overwrite Whether to overwrite a HttpRequest's protocol if it already has one.
*/
public ProtocolPolicy(String protocol, boolean overwrite) {
this.protocol = protocol;
this.overwrite = overwrite;
}
@Override
} | class ProtocolPolicy implements HttpPipelinePolicy {
private final String protocol;
private final boolean overwrite;
private final ClientLogger logger = new ClientLogger(ProtocolPolicy.class);
/**
* Creates a new ProtocolPolicy.
*
* @param protocol The protocol to set.
* @param overwrite Whether to overwrite a HttpRequest's protocol if it already has one.
*/
public ProtocolPolicy(String protocol, boolean overwrite) {
this.protocol = protocol;
this.overwrite = overwrite;
}
@Override
} |
If this method is used to log information, should we change the method names accordingly? | protected void internalSubscribe(String name, Consumer<Message<?>> consumer, Class<?> payloadType) {
final DefaultServiceBusMessageProcessor messageProcessor = new DefaultServiceBusMessageProcessor(
this.checkpointConfig, payloadType, consumer, this.messageConverter) {
@Override
protected void buildCheckpointFailMessage(Message<?> message, Throwable t) {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(String.format(MSG_FAIL_CHECKPOINT, message, name), t);
}
}
@Override
protected void buildCheckpointSuccessMessage(Message<?> message) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format(MSG_SUCCESS_CHECKPOINT, message, name, getCheckpointConfig().getCheckpointMode()));
}
}
};
Instrumentation instrumentation = new Instrumentation(name, Instrumentation.Type.CONSUME);
try {
instrumentationManager.addHealthInstrumentation(instrumentation);
ServiceBusProcessorClient processorClient = this.clientFactory.getOrCreateProcessor(name, clientConfig,
messageProcessor);
processorClient.start();
instrumentationManager.getHealthInstrumentation(instrumentation).markStartedSuccessfully();
} catch (Exception e) {
instrumentationManager.getHealthInstrumentation(instrumentation).markStartFailed(e);
LOGGER.error("ServiceBus processorClient startup failed, Caused by " + e.getMessage());
throw new ServiceBusRuntimeException("ServiceBus processor client startup failed, Caused by " + e.getMessage(), e);
}
} | protected void buildCheckpointFailMessage(Message<?> message, Throwable t) { | protected void internalSubscribe(String name, Consumer<Message<?>> consumer, Class<?> payloadType) {
final DefaultServiceBusMessageProcessor messageProcessor = new DefaultServiceBusMessageProcessor(
this.checkpointConfig, payloadType, consumer, this.messageConverter) {
@Override
protected void logCheckpointFail(Message<?> message, Throwable t) {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(String.format(MSG_FAIL_CHECKPOINT, message, name), t);
}
}
@Override
protected void logCheckpointSuccess(Message<?> message) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format(MSG_SUCCESS_CHECKPOINT, message, name, getCheckpointConfig().getCheckpointMode()));
}
}
};
Instrumentation instrumentation = new Instrumentation(name, Instrumentation.Type.CONSUME);
try {
instrumentationManager.addHealthInstrumentation(instrumentation);
ServiceBusProcessorClient processorClient = this.clientFactory.getOrCreateProcessor(name, clientConfig,
messageProcessor);
processorClient.start();
instrumentationManager.getHealthInstrumentation(instrumentation).markStartedSuccessfully();
} catch (Exception e) {
instrumentationManager.getHealthInstrumentation(instrumentation).markStartFailed(e);
LOGGER.error("ServiceBus processorClient startup failed, Caused by " + e.getMessage());
throw new ServiceBusRuntimeException("ServiceBus processor client startup failed, Caused by " + e.getMessage(), e);
}
} | class ServiceBusQueueTemplate extends ServiceBusTemplate<ServiceBusQueueClientFactory>
implements ServiceBusQueueOperation {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusQueueTemplate.class);
private static final String MSG_FAIL_CHECKPOINT = "Failed to checkpoint %s in queue '%s'";
private static final String MSG_SUCCESS_CHECKPOINT = "Checkpointed %s in queue '%s' in %s mode";
private final Set<String> subscribedQueues = ConcurrentHashMap.newKeySet();
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory) {
super(clientFactory);
}
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory,
ServiceBusMessageConverter messageConverter) {
super(clientFactory, messageConverter);
}
/**
* Register a message handler to receive message from the queue. A session handler will be registered if session is
* enabled.
*
* @param name The queue name.
* @param consumer The consumer method.
* @param payloadType The type of the message payload.
* @throws ServiceBusRuntimeException If fail to register the queue message handler.
*/
@Override
public void setClientConfig(@NonNull ServiceBusClientConfig clientConfig) {
this.clientConfig = clientConfig;
}
@Override
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
@Override
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
@SuppressWarnings("unchecked")
public boolean subscribe(String destination,
@NonNull Consumer<Message<?>> consumer,
@NonNull Class<?> targetPayloadClass) {
Assert.hasText(destination, "destination can't be null or empty");
if (subscribedQueues.contains(destination)) {
return false;
}
subscribedQueues.add(destination);
internalSubscribe(destination, consumer, targetPayloadClass);
return true;
}
@Override
public boolean unsubscribe(String destination) {
return subscribedQueues.remove(destination);
}
} | class ServiceBusQueueTemplate extends ServiceBusTemplate<ServiceBusQueueClientFactory>
implements ServiceBusQueueOperation {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusQueueTemplate.class);
private static final String MSG_FAIL_CHECKPOINT = "Failed to checkpoint %s in queue '%s'";
private static final String MSG_SUCCESS_CHECKPOINT = "Checkpointed %s in queue '%s' in %s mode";
private final Set<String> subscribedQueues = ConcurrentHashMap.newKeySet();
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory) {
super(clientFactory);
}
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory,
ServiceBusMessageConverter messageConverter) {
super(clientFactory, messageConverter);
}
/**
* Register a message handler to receive message from the queue. A session handler will be registered if session is
* enabled.
*
* @param name The queue name.
* @param consumer The consumer method.
* @param payloadType The type of the message payload.
* @throws ServiceBusRuntimeException If fail to register the queue message handler.
*/
@Override
public void setClientConfig(@NonNull ServiceBusClientConfig clientConfig) {
this.clientConfig = clientConfig;
}
@Override
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
@Override
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
@SuppressWarnings("unchecked")
public boolean subscribe(String destination,
@NonNull Consumer<Message<?>> consumer,
@NonNull Class<?> targetPayloadClass) {
Assert.hasText(destination, "destination can't be null or empty");
if (subscribedQueues.contains(destination)) {
return false;
}
subscribedQueues.add(destination);
internalSubscribe(destination, consumer, targetPayloadClass);
return true;
}
@Override
public boolean unsubscribe(String destination) {
return subscribedQueues.remove(destination);
}
} |
same here | protected void internalSubscribe(String name, Consumer<Message<?>> consumer, Class<?> payloadType) {
final DefaultServiceBusMessageProcessor messageProcessor = new DefaultServiceBusMessageProcessor(
this.checkpointConfig, payloadType, consumer, this.messageConverter) {
@Override
protected void buildCheckpointFailMessage(Message<?> message, Throwable t) {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(String.format(MSG_FAIL_CHECKPOINT, message, name), t);
}
}
@Override
protected void buildCheckpointSuccessMessage(Message<?> message) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format(MSG_SUCCESS_CHECKPOINT, message, name, getCheckpointConfig().getCheckpointMode()));
}
}
};
Instrumentation instrumentation = new Instrumentation(name, Instrumentation.Type.CONSUME);
try {
instrumentationManager.addHealthInstrumentation(instrumentation);
ServiceBusProcessorClient processorClient = this.clientFactory.getOrCreateProcessor(name, clientConfig,
messageProcessor);
processorClient.start();
instrumentationManager.getHealthInstrumentation(instrumentation).markStartedSuccessfully();
} catch (Exception e) {
instrumentationManager.getHealthInstrumentation(instrumentation).markStartFailed(e);
LOGGER.error("ServiceBus processorClient startup failed, Caused by " + e.getMessage());
throw new ServiceBusRuntimeException("ServiceBus processor client startup failed, Caused by " + e.getMessage(), e);
}
} | protected void buildCheckpointSuccessMessage(Message<?> message) { | protected void internalSubscribe(String name, Consumer<Message<?>> consumer, Class<?> payloadType) {
final DefaultServiceBusMessageProcessor messageProcessor = new DefaultServiceBusMessageProcessor(
this.checkpointConfig, payloadType, consumer, this.messageConverter) {
@Override
protected void logCheckpointFail(Message<?> message, Throwable t) {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(String.format(MSG_FAIL_CHECKPOINT, message, name), t);
}
}
@Override
protected void logCheckpointSuccess(Message<?> message) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format(MSG_SUCCESS_CHECKPOINT, message, name, getCheckpointConfig().getCheckpointMode()));
}
}
};
Instrumentation instrumentation = new Instrumentation(name, Instrumentation.Type.CONSUME);
try {
instrumentationManager.addHealthInstrumentation(instrumentation);
ServiceBusProcessorClient processorClient = this.clientFactory.getOrCreateProcessor(name, clientConfig,
messageProcessor);
processorClient.start();
instrumentationManager.getHealthInstrumentation(instrumentation).markStartedSuccessfully();
} catch (Exception e) {
instrumentationManager.getHealthInstrumentation(instrumentation).markStartFailed(e);
LOGGER.error("ServiceBus processorClient startup failed, Caused by " + e.getMessage());
throw new ServiceBusRuntimeException("ServiceBus processor client startup failed, Caused by " + e.getMessage(), e);
}
} | class ServiceBusQueueTemplate extends ServiceBusTemplate<ServiceBusQueueClientFactory>
implements ServiceBusQueueOperation {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusQueueTemplate.class);
private static final String MSG_FAIL_CHECKPOINT = "Failed to checkpoint %s in queue '%s'";
private static final String MSG_SUCCESS_CHECKPOINT = "Checkpointed %s in queue '%s' in %s mode";
private final Set<String> subscribedQueues = ConcurrentHashMap.newKeySet();
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory) {
super(clientFactory);
}
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory,
ServiceBusMessageConverter messageConverter) {
super(clientFactory, messageConverter);
}
/**
* Register a message handler to receive message from the queue. A session handler will be registered if session is
* enabled.
*
* @param name The queue name.
* @param consumer The consumer method.
* @param payloadType The type of the message payload.
* @throws ServiceBusRuntimeException If fail to register the queue message handler.
*/
@Override
public void setClientConfig(@NonNull ServiceBusClientConfig clientConfig) {
this.clientConfig = clientConfig;
}
@Override
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
@Override
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
@SuppressWarnings("unchecked")
public boolean subscribe(String destination,
@NonNull Consumer<Message<?>> consumer,
@NonNull Class<?> targetPayloadClass) {
Assert.hasText(destination, "destination can't be null or empty");
if (subscribedQueues.contains(destination)) {
return false;
}
subscribedQueues.add(destination);
internalSubscribe(destination, consumer, targetPayloadClass);
return true;
}
@Override
public boolean unsubscribe(String destination) {
return subscribedQueues.remove(destination);
}
} | class ServiceBusQueueTemplate extends ServiceBusTemplate<ServiceBusQueueClientFactory>
implements ServiceBusQueueOperation {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusQueueTemplate.class);
private static final String MSG_FAIL_CHECKPOINT = "Failed to checkpoint %s in queue '%s'";
private static final String MSG_SUCCESS_CHECKPOINT = "Checkpointed %s in queue '%s' in %s mode";
private final Set<String> subscribedQueues = ConcurrentHashMap.newKeySet();
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory) {
super(clientFactory);
}
public ServiceBusQueueTemplate(ServiceBusQueueClientFactory clientFactory,
ServiceBusMessageConverter messageConverter) {
super(clientFactory, messageConverter);
}
/**
* Register a message handler to receive message from the queue. A session handler will be registered if session is
* enabled.
*
* @param name The queue name.
* @param consumer The consumer method.
* @param payloadType The type of the message payload.
* @throws ServiceBusRuntimeException If fail to register the queue message handler.
*/
@Override
public void setClientConfig(@NonNull ServiceBusClientConfig clientConfig) {
this.clientConfig = clientConfig;
}
@Override
public <T> void deadLetter(String destination,
Message<T> message,
String deadLetterReason,
String deadLetterErrorDescription) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.deadLetter();
}
}
@Override
public <T> void abandon(String destination, Message<T> message) {
Assert.hasText(destination, "destination can't be null or empty");
final ServiceBusReceivedMessageContext messageContext = (ServiceBusReceivedMessageContext) message.getHeaders()
.get(
ServiceBusMessageHeaders.RECEIVED_MESSAGE_CONTEXT);
if (messageContext != null) {
messageContext.abandon();
}
}
@Override
@SuppressWarnings("unchecked")
public boolean subscribe(String destination,
@NonNull Consumer<Message<?>> consumer,
@NonNull Class<?> targetPayloadClass) {
Assert.hasText(destination, "destination can't be null or empty");
if (subscribedQueues.contains(destination)) {
return false;
}
subscribedQueues.add(destination);
internalSubscribe(destination, consumer, targetPayloadClass);
return true;
}
@Override
public boolean unsubscribe(String destination) {
return subscribedQueues.remove(destination);
}
} |
How about not introduce breaking change by doing like this: ``` Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval") .map(System::getProperty) .map(Long::valueOf) .filter(Object::nonNull) .findFirst() .orElse(0L) ``` And write unit test for this logic. | public KeyVaultKeyStore() {
creationDate = new Date();
String keyVaultUri = System.getProperty("azure.keyvault.uri");
String tenantId = System.getProperty("azure.keyvault.tenant-id");
String clientId = System.getProperty("azure.keyvault.client-id");
String clientSecret = System.getProperty("azure.keyvault.client-secret");
String managedIdentity = System.getProperty("azure.keyvault.managed-identity");
long refreshInterval = Optional.of("azure.keyvault.jca.certificates-refresh-interval-in-ms")
.map(System::getProperty)
.map(Long::valueOf)
.orElse(0L);
refreshCertificatesWhenHaveUnTrustCertificate =
Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate")
.map(System::getProperty)
.map(Boolean::parseBoolean)
.orElse(false);
jreCertificates = JreCertificates.getInstance();
wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath);
customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath);
keyVaultCertificates = new KeyVaultCertificates(
refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity);
classpathCertificates = new ClasspathCertificates();
allCertificates = Arrays.asList(
jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates);
} | long refreshInterval = Optional.of("azure.keyvault.jca.certificates-refresh-interval-in-ms") | public KeyVaultKeyStore() {
creationDate = new Date();
String keyVaultUri = System.getProperty("azure.keyvault.uri");
String tenantId = System.getProperty("azure.keyvault.tenant-id");
String clientId = System.getProperty("azure.keyvault.client-id");
String clientSecret = System.getProperty("azure.keyvault.client-secret");
String managedIdentity = System.getProperty("azure.keyvault.managed-identity");
long refreshInterval = getRefreshInterval();
refreshCertificatesWhenHaveUnTrustCertificate =
Optional.of("azure.keyvault.jca.refresh-certificates-when-have-un-trust-certificate")
.map(System::getProperty)
.map(Boolean::parseBoolean)
.orElse(false);
jreCertificates = JreCertificates.getInstance();
wellKnowCertificates = SpecificPathCertificates.getSpecificPathCertificates(wellKnowPath);
customCertificates = SpecificPathCertificates.getSpecificPathCertificates(customPath);
keyVaultCertificates = new KeyVaultCertificates(
refreshInterval, keyVaultUri, tenantId, clientId, clientSecret, managedIdentity);
classpathCertificates = new ClasspathCertificates();
allCertificates = Arrays.asList(
jreCertificates, wellKnowCertificates, customCertificates, keyVaultCertificates, classpathCertificates);
} | class KeyVaultKeyStore extends KeyStoreSpi {
/**
* Stores the key-store name.
*/
public static final String KEY_STORE_TYPE = "AzureKeyVault";
/**
* Stores the algorithm name.
*/
public static final String ALGORITHM_NAME = KEY_STORE_TYPE;
/**
* Stores the logger.
*/
private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName());
/**
* Stores the Jre key store certificates.
*/
private final JreCertificates jreCertificates;
/**
* Store well Know certificates loaded from specific path.
*/
private final SpecificPathCertificates wellKnowCertificates;
/**
* Store custom certificates loaded from specific path.
*/
private final SpecificPathCertificates customCertificates;
/**
* Store certificates loaded from KeyVault.
*/
private final KeyVaultCertificates keyVaultCertificates;
/**
* Store certificates loaded from classpath.
*/
private final ClasspathCertificates classpathCertificates;
/**
* Stores all the certificates.
*/
private final List<AzureCertificates> allCertificates;
/**
* Stores the creation date.
*/
private final Date creationDate;
private final boolean refreshCertificatesWhenHaveUnTrustCertificate;
/**
* Store the path where the well know certificate is placed
*/
final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known"))
.orElse("/etc/certs/well-known/");
/**
* Store the path where the custom certificate is placed
*/
final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom"))
.orElse("/etc/certs/custom/");
/**
* Constructor.
*
* <p>
* The constructor uses System.getProperty for
* <code>azure.keyvault.uri</code>,
* <code>azure.keyvault.aadAuthenticationUrl</code>,
* <code>azure.keyvault.tenantId</code>,
* <code>azure.keyvault.clientId</code>,
* <code>azure.keyvault.clientSecret</code> and
* <code>azure.keyvault.managedIdentity</code> to initialize the
* Key Vault client.
* </p>
*/
/**
* get key vault key store by system property
*
* @return KeyVault key store
* @throws CertificateException if any of the certificates in the
* keystore could not be loaded
* @throws NoSuchAlgorithmException when algorithm is unavailable.
* @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type
* @throws IOException when an I/O error occurs.
*/
public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException {
KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME);
KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter(
System.getProperty("azure.keyvault.uri"),
System.getProperty("azure.keyvault.tenant-id"),
System.getProperty("azure.keyvault.client-id"),
System.getProperty("azure.keyvault.client-secret"),
System.getProperty("azure.keyvault.managed-identity"));
keyStore.load(parameter);
return keyStore;
}
@Override
public Enumeration<String> engineAliases() {
return Collections.enumeration(getAllAliases());
}
@Override
public boolean engineContainsAlias(String alias) {
return engineIsCertificateEntry(alias);
}
@Override
public void engineDeleteEntry(String alias) {
allCertificates.forEach(a -> a.deleteEntry(alias));
}
@Override
public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) {
return super.engineEntryInstanceOf(alias, entryClass);
}
@Override
public Certificate engineGetCertificate(String alias) {
Certificate certificate = allCertificates.stream()
.map(AzureCertificates::getCertificates)
.filter(a -> a.containsKey(alias))
.findFirst()
.map(certificates -> certificates.get(alias))
.orElse(null);
if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) {
keyVaultCertificates.refreshCertificates();
certificate = keyVaultCertificates.getCertificates().get(alias);
}
return certificate;
}
@Override
public String engineGetCertificateAlias(Certificate cert) {
String alias = null;
if (cert != null) {
List<String> aliasList = getAllAliases();
for (String candidateAlias : aliasList) {
Certificate certificate = engineGetCertificate(candidateAlias);
if (certificate.equals(cert)) {
alias = candidateAlias;
break;
}
}
}
if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) {
alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert);
}
return alias;
}
@Override
public Certificate[] engineGetCertificateChain(String alias) {
Certificate[] chain = null;
Certificate certificate = engineGetCertificate(alias);
if (certificate != null) {
chain = new Certificate[1];
chain[0] = certificate;
}
return chain;
}
@Override
public Date engineGetCreationDate(String alias) {
return new Date(creationDate.getTime());
}
@Override
public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException {
return super.engineGetEntry(alias, protParam);
}
@Override
public Key engineGetKey(String alias, char[] password) {
return allCertificates.stream()
.map(AzureCertificates::getCertificateKeys)
.filter(a -> a.containsKey(alias))
.findFirst()
.map(certificateKeys -> certificateKeys.get(alias))
.orElse(null);
}
@Override
public boolean engineIsCertificateEntry(String alias) {
return getAllAliases().contains(alias);
}
@Override
public boolean engineIsKeyEntry(String alias) {
return engineIsCertificateEntry(alias);
}
@Override
public void engineLoad(KeyStore.LoadStoreParameter param) {
if (param instanceof KeyVaultLoadStoreParameter) {
KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param;
keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(),
parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity());
}
classpathCertificates.loadCertificatesFromClasspath();
}
@Override
public void engineLoad(InputStream stream, char[] password) {
classpathCertificates.loadCertificatesFromClasspath();
}
private List<String> getAllAliases() {
List<String> allAliases = new ArrayList<>(jreCertificates.getAliases());
Map<String, List<String>> aliasLists = new HashMap<>();
aliasLists.put("well known certificates", wellKnowCertificates.getAliases());
aliasLists.put("custom certificates", customCertificates.getAliases());
aliasLists.put("key vault certificates", keyVaultCertificates.getAliases());
aliasLists.put("class path certificates", classpathCertificates.getAliases());
aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> {
if (allAliases.contains(alias)) {
LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType));
} else {
allAliases.add(alias);
}
}));
return allAliases;
}
@Override
public void engineSetCertificateEntry(String alias, Certificate certificate) {
if (getAllAliases().contains(alias)) {
LOGGER.log(WARNING, "Cannot overwrite own certificate");
return;
}
classpathCertificates.setCertificateEntry(alias, certificate);
}
@Override
public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException {
super.engineSetEntry(alias, entry, protParam);
}
@Override
public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) {
}
@Override
public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) {
}
@Override
public int engineSize() {
return getAllAliases().size();
}
@Override
public void engineStore(OutputStream stream, char[] password) {
}
@Override
public void engineStore(KeyStore.LoadStoreParameter param) {
}
} | class KeyVaultKeyStore extends KeyStoreSpi {
/**
* Stores the key-store name.
*/
public static final String KEY_STORE_TYPE = "AzureKeyVault";
/**
* Stores the algorithm name.
*/
public static final String ALGORITHM_NAME = KEY_STORE_TYPE;
/**
* Stores the logger.
*/
private static final Logger LOGGER = Logger.getLogger(KeyVaultKeyStore.class.getName());
/**
* Stores the Jre key store certificates.
*/
private final JreCertificates jreCertificates;
/**
* Store well Know certificates loaded from specific path.
*/
private final SpecificPathCertificates wellKnowCertificates;
/**
* Store custom certificates loaded from specific path.
*/
private final SpecificPathCertificates customCertificates;
/**
* Store certificates loaded from KeyVault.
*/
private final KeyVaultCertificates keyVaultCertificates;
/**
* Store certificates loaded from classpath.
*/
private final ClasspathCertificates classpathCertificates;
/**
* Stores all the certificates.
*/
private final List<AzureCertificates> allCertificates;
/**
* Stores the creation date.
*/
private final Date creationDate;
private final boolean refreshCertificatesWhenHaveUnTrustCertificate;
/**
* Store the path where the well know certificate is placed
*/
final String wellKnowPath = Optional.ofNullable(System.getProperty("azure.cert-path.well-known"))
.orElse("/etc/certs/well-known/");
/**
* Store the path where the custom certificate is placed
*/
final String customPath = Optional.ofNullable(System.getProperty("azure.cert-path.custom"))
.orElse("/etc/certs/custom/");
/**
* Constructor.
*
* <p>
* The constructor uses System.getProperty for
* <code>azure.keyvault.uri</code>,
* <code>azure.keyvault.aadAuthenticationUrl</code>,
* <code>azure.keyvault.tenantId</code>,
* <code>azure.keyvault.clientId</code>,
* <code>azure.keyvault.clientSecret</code> and
* <code>azure.keyvault.managedIdentity</code> to initialize the
* Key Vault client.
* </p>
*/
Long getRefreshInterval() {
return Stream.of("azure.keyvault.jca.certificates-refresh-interval-in-ms", "azure.keyvault.jca.certificates-refresh-interval")
.map(System::getProperty)
.filter(Objects::nonNull)
.map(Long::valueOf)
.findFirst()
.orElse(0L);
}
/**
* get key vault key store by system property
*
* @return KeyVault key store
* @throws CertificateException if any of the certificates in the
* keystore could not be loaded
* @throws NoSuchAlgorithmException when algorithm is unavailable.
* @throws KeyStoreException when no Provider supports a KeyStoreSpi implementation for the specified type
* @throws IOException when an I/O error occurs.
*/
public static KeyStore getKeyVaultKeyStoreBySystemProperty() throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException {
KeyStore keyStore = KeyStore.getInstance(KeyVaultJcaProvider.PROVIDER_NAME);
KeyVaultLoadStoreParameter parameter = new KeyVaultLoadStoreParameter(
System.getProperty("azure.keyvault.uri"),
System.getProperty("azure.keyvault.tenant-id"),
System.getProperty("azure.keyvault.client-id"),
System.getProperty("azure.keyvault.client-secret"),
System.getProperty("azure.keyvault.managed-identity"));
keyStore.load(parameter);
return keyStore;
}
@Override
public Enumeration<String> engineAliases() {
return Collections.enumeration(getAllAliases());
}
@Override
public boolean engineContainsAlias(String alias) {
return engineIsCertificateEntry(alias);
}
@Override
public void engineDeleteEntry(String alias) {
allCertificates.forEach(a -> a.deleteEntry(alias));
}
@Override
public boolean engineEntryInstanceOf(String alias, Class<? extends KeyStore.Entry> entryClass) {
return super.engineEntryInstanceOf(alias, entryClass);
}
@Override
public Certificate engineGetCertificate(String alias) {
Certificate certificate = allCertificates.stream()
.map(AzureCertificates::getCertificates)
.filter(a -> a.containsKey(alias))
.findFirst()
.map(certificates -> certificates.get(alias))
.orElse(null);
if (refreshCertificatesWhenHaveUnTrustCertificate && certificate == null) {
keyVaultCertificates.refreshCertificates();
certificate = keyVaultCertificates.getCertificates().get(alias);
}
return certificate;
}
@Override
public String engineGetCertificateAlias(Certificate cert) {
String alias = null;
if (cert != null) {
List<String> aliasList = getAllAliases();
for (String candidateAlias : aliasList) {
Certificate certificate = engineGetCertificate(candidateAlias);
if (certificate.equals(cert)) {
alias = candidateAlias;
break;
}
}
}
if (refreshCertificatesWhenHaveUnTrustCertificate && alias == null) {
alias = keyVaultCertificates.refreshAndGetAliasByCertificate(cert);
}
return alias;
}
@Override
public Certificate[] engineGetCertificateChain(String alias) {
Certificate[] chain = null;
Certificate certificate = engineGetCertificate(alias);
if (certificate != null) {
chain = new Certificate[1];
chain[0] = certificate;
}
return chain;
}
@Override
public Date engineGetCreationDate(String alias) {
return new Date(creationDate.getTime());
}
@Override
public KeyStore.Entry engineGetEntry(String alias, KeyStore.ProtectionParameter protParam) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableEntryException {
return super.engineGetEntry(alias, protParam);
}
@Override
public Key engineGetKey(String alias, char[] password) {
return allCertificates.stream()
.map(AzureCertificates::getCertificateKeys)
.filter(a -> a.containsKey(alias))
.findFirst()
.map(certificateKeys -> certificateKeys.get(alias))
.orElse(null);
}
@Override
public boolean engineIsCertificateEntry(String alias) {
return getAllAliases().contains(alias);
}
@Override
public boolean engineIsKeyEntry(String alias) {
return engineIsCertificateEntry(alias);
}
@Override
public void engineLoad(KeyStore.LoadStoreParameter param) {
if (param instanceof KeyVaultLoadStoreParameter) {
KeyVaultLoadStoreParameter parameter = (KeyVaultLoadStoreParameter) param;
keyVaultCertificates.updateKeyVaultClient(parameter.getUri(), parameter.getTenantId(),
parameter.getClientId(), parameter.getClientSecret(), parameter.getManagedIdentity());
}
classpathCertificates.loadCertificatesFromClasspath();
}
@Override
public void engineLoad(InputStream stream, char[] password) {
classpathCertificates.loadCertificatesFromClasspath();
}
private List<String> getAllAliases() {
List<String> allAliases = new ArrayList<>(jreCertificates.getAliases());
Map<String, List<String>> aliasLists = new HashMap<>();
aliasLists.put("well known certificates", wellKnowCertificates.getAliases());
aliasLists.put("custom certificates", customCertificates.getAliases());
aliasLists.put("key vault certificates", keyVaultCertificates.getAliases());
aliasLists.put("class path certificates", classpathCertificates.getAliases());
aliasLists.forEach((certificatesType, certificates) -> certificates.forEach(alias -> {
if (allAliases.contains(alias)) {
LOGGER.log(FINE, String.format("The certificate %s under %s already exists", alias, certificatesType));
} else {
allAliases.add(alias);
}
}));
return allAliases;
}
@Override
public void engineSetCertificateEntry(String alias, Certificate certificate) {
if (getAllAliases().contains(alias)) {
LOGGER.log(WARNING, "Cannot overwrite own certificate");
return;
}
classpathCertificates.setCertificateEntry(alias, certificate);
}
@Override
public void engineSetEntry(String alias, KeyStore.Entry entry, KeyStore.ProtectionParameter protParam) throws KeyStoreException {
super.engineSetEntry(alias, entry, protParam);
}
@Override
public void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) {
}
@Override
public void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) {
}
@Override
public int engineSize() {
return getAllAliases().size();
}
@Override
public void engineStore(OutputStream stream, char[] password) {
}
@Override
public void engineStore(KeyStore.LoadStoreParameter param) {
}
} |
when string to sign format changes we need to add new branch here. | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
same here. | private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
we should version string to sign. | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
this.encryptionScope == null ? "" : this.encryptionScope,
""
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
""
);
} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} |
same here. | private String stringToSign(String canonicalName) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | "", | private String stringToSign(String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"", /* Encryption scope. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
```suggestion return createIfNotExistsWithResponse(null, null, null).map(response -> true).switchIfEmpty(Mono.just(false)); ``` may generate slightly less garbage. please revisit similar places. | public Mono<Boolean> createIfNotExists() {
return createIfNotExistsWithResponse(null, null, null).flatMap(response ->
Mono.just(true)).switchIfEmpty(Mono.just(false));
} | Mono.just(true)).switchIfEmpty(Mono.just(false)); | public Mono<Boolean> createIfNotExists() {
return createIfNotExistsWithResponse(null).map(response -> response.getStatusCode() != 409);
} | class BlobContainerAsyncClient {
/**
* Special container name for the root container in the Storage account.
*/
public static final String ROOT_CONTAINER_NAME = "$root";
/**
* Special container name for the static website container in the Storage account.
*/
public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web";
/**
* Special container name for the logs container in the Storage account.
*/
public static final String LOG_CONTAINER_NAME = "$logs";
private static final ClientLogger LOGGER = new ClientLogger(BlobContainerAsyncClient.class);
private final AzureBlobStorageImpl azureBlobStorage;
private final String accountName;
private final String containerName;
private final BlobServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
private final EncryptionScope encryptionScope;
private final BlobContainerEncryptionScope blobContainerEncryptionScope;
/**
* Package-private constructor for use by {@link BlobContainerClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
*/
BlobContainerAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion,
String accountName, String containerName, CpkInfo customerProvidedKey, EncryptionScope encryptionScope,
BlobContainerEncryptionScope blobContainerEncryptionScope) {
this.azureBlobStorage = new AzureBlobStorageImplBuilder()
.pipeline(pipeline)
.url(url)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
this.accountName = accountName;
this.containerName = containerName;
this.customerProvidedKey = customerProvidedKey;
this.encryptionScope = encryptionScope;
this.blobContainerEncryptionScope = blobContainerEncryptionScope;
/* Check to make sure the uri is valid. We don't want the error to occur later in the generated layer
when the sas token has already been applied. */
try {
URI.create(getBlobContainerUrl());
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
/**
* Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new
* BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
* <pre>
* BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
*
* @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters,
* pass in the url encoded version of the blob name.
* @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container.
*/
public BlobAsyncClient getBlobAsyncClient(String blobName) {
return getBlobAsyncClient(blobName, null);
}
/**
* Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new
* BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
* <pre>
* BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
*
* @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters,
* pass in the url encoded version of the blob name.
* @param snapshot the snapshot identifier for the blob.
* @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container.
*/
public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) {
return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getBlobContainerName(), blobName, snapshot, getCustomerProvidedKey(), encryptionScope);
}
/**
* Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new
* BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient.
*
* @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters,
* pass in the url encoded version of the blob name.
* @param versionId the version identifier for the blob, pass {@code null} to interact with the latest blob version.
* @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container.
*/
public BlobAsyncClient getBlobVersionAsyncClient(String blobName, String versionId) {
return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getBlobContainerName(), blobName, null, getCustomerProvidedKey(), encryptionScope, versionId);
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureBlobStorage.getUrl();
}
/**
* Gets the URL of the container represented by this client.
*
* @return the URL.
*/
public String getBlobContainerUrl() {
return azureBlobStorage.getUrl() + "/" + containerName;
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getBlobContainerName -->
* <pre>
* String containerName = client.getBlobContainerName&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getBlobContainerName -->
*
* @return The name of container.
*/
public String getBlobContainerName() {
return containerName;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Get an async client pointing to the account.
*
* @return {@link BlobServiceAsyncClient}
*/
public BlobServiceAsyncClient getServiceAsyncClient() {
return getServiceClientBuilder().buildAsyncClient();
}
BlobServiceClientBuilder getServiceClientBuilder() {
CustomerProvidedKey encryptionKey = this.customerProvidedKey == null ? null
: new CustomerProvidedKey(this.customerProvidedKey.getEncryptionKey());
return new BlobServiceClientBuilder()
.endpoint(this.getBlobContainerUrl())
.pipeline(this.getHttpPipeline())
.serviceVersion(this.serviceVersion)
.blobContainerEncryptionScope(this.blobContainerEncryptionScope)
.encryptionScope(this.getEncryptionScope())
.customerProvidedKey(encryptionKey);
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureBlobStorage.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} associated with this client that will be passed to {@link BlobAsyncClient
* BlobAsyncClients} when {@link
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return customerProvidedKey;
}
/**
* Gets the {@link EncryptionScope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
public String getEncryptionScope() {
if (encryptionScope == null) {
return null;
}
return encryptionScope.getEncryptionScope();
}
/**
* Gets if the container this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.exists -->
* <pre>
* client.exists&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.exists -->
*
* @return true if the container exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Boolean> exists() {
return existsWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets if the container this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.existsWithResponse -->
* <pre>
* client.existsWithResponse&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.existsWithResponse -->
*
* @return true if the container exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Boolean>> existsWithResponse() {
try {
return withContext(this::existsWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets if the container this client represents exists in the cloud.
*
* @return true if the container exists, false if it doesn't
*/
Mono<Response<Boolean>> existsWithResponse(Context context) {
return this.getPropertiesWithResponse(null, context)
.map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true))
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404,
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false));
});
}
/**
* Creates a new container within a storage account. If a container with the same name already exists, the operation
* fails. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.create -->
* <pre>
* client.create&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.create -->
*
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> create() {
return createWithResponse(null, null).flatMap(FluxUtil::toMono);
}
/**
* Creates a new container within a storage account. If a container with the same name already exists, the operation
* fails. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.createWithResponse
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.createWithResponse
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> createWithResponse(Map<String, String> metadata, PublicAccessType accessType) {
try {
return withContext(context -> createWithResponse(metadata, accessType, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> createWithResponse(Map<String, String> metadata, PublicAccessType accessType,
Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().createWithResponseAsync(
containerName, null, metadata, accessType, null, blobContainerEncryptionScope,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Creates a new container within a storage account if it does not exist. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExists -->
* <pre>
* client.createIfNotExists&
* if &
* System.out.println&
* &
* System.out.println&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExists -->
*
* @return A reactive response signaling completion. {@code True} indicates a new container was created,
* {@code False} indicates a container already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new container within a storage account if it does not exist. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExistsWithResponse
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* client.createIfNotExistsWithResponse&
* .doOnSuccess&
* .subscribe&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExistsWithResponse
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @return A reactive response signaling completion. The presence of a {@link Response} item indicates a new
* container was created. An empty {@code Mono} indicates a container already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> createIfNotExistsWithResponse(Map<String, String> metadata, PublicAccessType accessType) {
try {
return createIfNotExistsWithResponse(metadata, accessType, null);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> createIfNotExistsWithResponse(Map<String, String> metadata, PublicAccessType accessType, Context context) {
try {
return createWithResponse(metadata, accessType, context)
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 409,
t -> Mono.empty());
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Marks the specified container for deletion. The container and any blobs contained within it are later deleted
* during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.delete -->
* <pre>
* client.delete&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.delete -->
*
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> delete() {
return deleteWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Marks the specified container for deletion. The container and any blobs contained within it are later deleted
* during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.deleteWithResponse
* <pre>
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.deleteWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.deleteWithResponse
*
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
* @throws UnsupportedOperationException If either {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponse(BlobRequestConditions requestConditions) {
try {
return withContext(context -> deleteWithResponse(requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> deleteWithResponse(BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
if (!validateNoETag(requestConditions)) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("ETag access conditions are not supported for this API."));
}
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().deleteWithResponseAsync(containerName, null,
requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Marks the specified container for deletion if it exists. The container and any blobs contained within it are later deleted
* during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* if &
* System.out.println&
* &
* System.out.println&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExists -->
*
* @return A reactive response signaling completion. {@code True} indicates the container was deleted,
* {@code False} indicates the container does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Boolean> deleteIfExists() {
return deleteIfExistsWithResponse(null).flatMap(response -> Mono.just(true))
.switchIfEmpty(Mono.just(false));
}
/**
* Marks the specified container for deletion if it exists. The container and any blobs contained within it are
* later deleted during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExistsWithResponse
* <pre>
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.deleteIfExistsWithResponse&
* .doOnSuccess&
* .subscribe&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExistsWithResponse
*
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signaling completion. The presence of a {@link Response} item indicates the container
* was successfully deleted. An empty {@code Mono} indicates that the container did not exist.
* @throws UnsupportedOperationException If either {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteIfExistsWithResponse(BlobRequestConditions requestConditions) {
try {
return deleteIfExistsWithResponse(requestConditions, null);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> deleteIfExistsWithResponse(BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
try {
return deleteWithResponse(requestConditions, context)
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404,
t -> Mono.empty());
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Returns the container's metadata and system properties. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getProperties -->
* <pre>
* client.getProperties&
* System.out.printf&
* response.getBlobPublicAccess&
* response.hasLegalHold&
* response.hasImmutabilityPolicy&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getProperties -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* container properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlobContainerProperties> getProperties() {
return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Returns the container's metadata and system properties. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getPropertiesWithResponse
* <pre>
* client.getPropertiesWithResponse&
* System.out.printf&
* response.getValue&
* response.getValue&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getPropertiesWithResponse
*
* @param leaseId The lease ID the active lease on the container must match.
* @return A reactive response containing the container properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlobContainerProperties>> getPropertiesWithResponse(String leaseId) {
try {
return withContext(context -> getPropertiesWithResponse(leaseId, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<BlobContainerProperties>> getPropertiesWithResponse(String leaseId, Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers()
.getPropertiesWithResponseAsync(containerName, null, leaseId, null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(rb -> {
ContainersGetPropertiesHeaders hd = rb.getDeserializedHeaders();
BlobContainerProperties properties = new BlobContainerProperties(hd.getXMsMeta(), hd.getETag(),
hd.getLastModified(), hd.getXMsLeaseDuration(), hd.getXMsLeaseState(), hd.getXMsLeaseStatus(),
hd.getXMsBlobPublicAccess(), Boolean.TRUE.equals(hd.isXMsHasImmutabilityPolicy()),
Boolean.TRUE.equals(hd.isXMsHasLegalHold()), hd.getXMsDefaultEncryptionScope(),
hd.isXMsDenyEncryptionScopeOverride(), hd.isXMsImmutableStorageWithVersioningEnabled());
return new SimpleResponse<>(rb, properties);
});
}
/**
* Sets the container's metadata. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setMetadata
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* client.setMetadata&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setMetadata
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> setMetadata(Map<String, String> metadata) {
return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono);
}
/**
* Sets the container's metadata. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setMetadataWithResponse
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.setMetadataWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setMetadataWithResponse
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
* @throws UnsupportedOperationException If one of {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> setMetadataWithResponse(metadata, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Context context) {
context = context == null ? Context.NONE : context;
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
if (!validateNoETag(requestConditions) || requestConditions.getIfUnmodifiedSince() != null) {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"If-Modified-Since is the only HTTP access condition supported for this API"));
}
return this.azureBlobStorage.getContainers().setMetadataWithResponseAsync(containerName, null,
requestConditions.getLeaseId(), metadata, requestConditions.getIfModifiedSince(), null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicy -->
* <pre>
* client.getAccessPolicy&
* System.out.printf&
*
* for &
* System.out.printf&
* identifier.getId&
* identifier.getAccessPolicy&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicy -->
*
* @return A reactive response containing the container access policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlobContainerAccessPolicies> getAccessPolicy() {
return getAccessPolicyWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicyWithResponse
* <pre>
* client.getAccessPolicyWithResponse&
* System.out.printf&
*
* for &
* System.out.printf&
* identifier.getId&
* identifier.getAccessPolicy&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicyWithResponse
*
* @param leaseId The lease ID the active lease on the container must match.
* @return A reactive response containing the container access policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlobContainerAccessPolicies>> getAccessPolicyWithResponse(String leaseId) {
try {
return withContext(context -> getAccessPolicyWithResponse(leaseId, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<BlobContainerAccessPolicies>> getAccessPolicyWithResponse(String leaseId, Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().getAccessPolicyWithResponseAsync(
containerName, null, leaseId, null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response,
new BlobContainerAccessPolicies(response.getDeserializedHeaders().getXMsBlobPublicAccess(),
response.getValue())));
}
/**
* Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly.
* Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to
* ensure the time formatting is compatible with the service. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicy
* <pre>
* BlobSignedIdentifier identifier = new BlobSignedIdentifier&
* .setId&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* client.setAccessPolicy&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicy
*
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @param identifiers A list of {@link BlobSignedIdentifier} objects that specify the permissions for the container.
* Please see
* <a href="https:
* for more information. Passing null will clear all access policies.
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> setAccessPolicy(PublicAccessType accessType, List<BlobSignedIdentifier> identifiers) {
return setAccessPolicyWithResponse(accessType, identifiers, null).flatMap(FluxUtil::toMono);
}
/**
* Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly.
* Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to
* ensure the time formatting is compatible with the service. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicyWithResponse
* <pre>
* BlobSignedIdentifier identifier = new BlobSignedIdentifier&
* .setId&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.setAccessPolicyWithResponse&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicyWithResponse
*
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @param identifiers A list of {@link BlobSignedIdentifier} objects that specify the permissions for the container.
* Please see
* <a href="https:
* for more information. Passing null will clear all access policies.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
* @throws UnsupportedOperationException If either {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> setAccessPolicyWithResponse(PublicAccessType accessType,
List<BlobSignedIdentifier> identifiers, BlobRequestConditions requestConditions) {
try {
return withContext(context ->
setAccessPolicyWithResponse(accessType, identifiers, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> setAccessPolicyWithResponse(PublicAccessType accessType,
List<BlobSignedIdentifier> identifiers, BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
if (!validateNoETag(requestConditions)) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("ETag access conditions are not supported for this API."));
}
/*
We truncate to seconds because the service only supports nanoseconds or seconds, but doing an
OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This
allows for proper serialization with no real detriment to users as sub-second precision on active time for
signed identifiers is not really necessary.
*/
if (identifiers != null) {
for (BlobSignedIdentifier identifier : identifiers) {
if (identifier.getAccessPolicy() != null && identifier.getAccessPolicy().getStartsOn() != null) {
identifier.getAccessPolicy().setStartsOn(
identifier.getAccessPolicy().getStartsOn().truncatedTo(ChronoUnit.SECONDS));
}
if (identifier.getAccessPolicy() != null && identifier.getAccessPolicy().getExpiresOn() != null) {
identifier.getAccessPolicy().setExpiresOn(
identifier.getAccessPolicy().getExpiresOn().truncatedTo(ChronoUnit.SECONDS));
}
}
}
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().setAccessPolicyWithResponseAsync(
containerName, null, requestConditions.getLeaseId(), accessType, requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), null, identifiers,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are
* flattened and only actual blobs and no directories are returned.
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return
*
* <ul>
* <li>foo/foo1
* <li>foo/foo2
* <li>bar
* </ul>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobs -->
* <pre>
* client.listBlobs&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobs -->
*
* @return A reactive response emitting the flattened blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobs() {
return this.listBlobs(new ListBlobsOptions());
}
/**
* Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are
* flattened and only actual blobs and no directories are returned.
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return
*
* <ul>
* <li>foo/foo1
* <li>foo/foo2
* <li>bar
* </ul>
*
*
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
* <pre>
* ListBlobsOptions options = new ListBlobsOptions&
* .setPrefix&
* .setDetails&
* .setRetrieveDeletedBlobs&
* .setRetrieveSnapshots&
*
* client.listBlobs&
* System.out.printf&
* blob.getName&
* blob.isPrefix&
* blob.isDeleted&
* blob.getSnapshot&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
*
* @param options {@link ListBlobsOptions}
* @return A reactive response emitting the listed blobs, flattened.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobs(ListBlobsOptions options) {
return listBlobs(options, null);
}
/**
* Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are
* flattened and only actual blobs and no directories are returned.
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return
*
* <ul>
* <li>foo/foo1
* <li>foo/foo2
* <li>bar
* </ul>
*
*
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
* <pre>
* ListBlobsOptions options = new ListBlobsOptions&
* .setPrefix&
* .setDetails&
* .setRetrieveDeletedBlobs&
* .setRetrieveSnapshots&
*
* String continuationToken = "continuationToken";
*
* client.listBlobs&
* System.out.printf&
* blob.getName&
* blob.isPrefix&
* blob.isDeleted&
* blob.getSnapshot&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
*
* @param options {@link ListBlobsOptions}
* @param continuationToken Identifies the portion of the list to be returned with the next list operation.
* @return A reactive response emitting the listed blobs, flattened.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobs(ListBlobsOptions options, String continuationToken) {
try {
return listBlobsFlatWithOptionalTimeout(options, continuationToken, null);
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
/*
* Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous
* ContainerClient. Applies the given timeout to each Mono<ContainersListBlobFlatSegmentResponse> backing the
* PagedFlux.
*
* @param options {@link ListBlobsOptions}.
* @param timeout An optional timeout to be applied to the network asynchronous operations.
* @return A reactive response emitting the listed blobs, flattened.
*/
PagedFlux<BlobItem> listBlobsFlatWithOptionalTimeout(ListBlobsOptions options, String continuationToken,
Duration timeout) {
BiFunction<String, Integer, Mono<PagedResponse<BlobItem>>> func =
(marker, pageSize) -> {
ListBlobsOptions finalOptions;
if (pageSize != null) {
if (options == null) {
finalOptions = new ListBlobsOptions().setMaxResultsPerPage(pageSize);
} else {
finalOptions = new ListBlobsOptions()
.setMaxResultsPerPage(pageSize)
.setPrefix(options.getPrefix())
.setDetails(options.getDetails());
}
} else {
finalOptions = options;
}
return listBlobsFlatSegment(marker, finalOptions, timeout)
.map(response -> {
List<BlobItem> value = response.getValue().getSegment() == null
? Collections.emptyList()
: response.getValue().getSegment().getBlobItems().stream()
.map(ModelHelper::populateBlobItem)
.collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getValue().getNextMarker(),
response.getDeserializedHeaders());
});
};
return new PagedFlux<>(pageSize -> func.apply(continuationToken, pageSize), func);
}
/*
* Returns a single segment of blobs starting from the specified Marker. Use an empty
* marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
* After getting a segment, process it, and then call ListBlobs again (passing the previously-returned
* Marker) to get the next segment. For more information, see the
* <a href="https:
*
* @param marker
* Identifies the portion of the list to be returned with the next list operation.
* This value is returned by the response of a previous list operation as the
* ListBlobsFlatSegmentResponse.body().getNextMarker(). Set to null to list the first segment.
* @param options
* {@link ListBlobsOptions}
*
* @return Emits the successful response.
*/
private Mono<ContainersListBlobFlatSegmentResponse> listBlobsFlatSegment(String marker, ListBlobsOptions options,
Duration timeout) {
options = options == null ? new ListBlobsOptions() : options;
ArrayList<ListBlobsIncludeItem> include =
options.getDetails().toList().isEmpty() ? null : options.getDetails().toList();
return StorageImplUtils.applyOptionalTimeout(
this.azureBlobStorage.getContainers().listBlobFlatSegmentWithResponseAsync(containerName, options.getPrefix(),
marker, options.getMaxResultsPerPage(), include,
null, null, Context.NONE), timeout);
}
/**
* Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory
* (prefix). Directories will have {@link BlobItem
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return the following results when prefix=null:
*
* <ul>
* <li>foo/ (isPrefix = true)
* <li>bar (isPrefix = false)
* </ul>
* <p>
* will return the following results when prefix="foo/":
*
* <ul>
* <li>foo/foo1 (isPrefix = false)
* <li>foo/foo2 (isPrefix = false)
* </ul>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
* <pre>
* client.listBlobsByHierarchy&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
*
* @param directory The directory to list blobs underneath
* @return A reactive response emitting the prefixes and blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobsByHierarchy(String directory) {
return this.listBlobsByHierarchy("/", new ListBlobsOptions().setPrefix(directory));
}
/**
* Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix
* (directory). Directories will have {@link BlobItem
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return the following results when prefix=null:
*
* <ul>
* <li>foo/ (isPrefix = true)
* <li>bar (isPrefix = false)
* </ul>
* <p>
* will return the following results when prefix="foo/":
*
* <ul>
* <li>foo/foo1 (isPrefix = false)
* <li>foo/foo2 (isPrefix = false)
* </ul>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
* <pre>
* ListBlobsOptions options = new ListBlobsOptions&
* .setPrefix&
* .setDetails&
* .setRetrieveDeletedBlobs&
* .setRetrieveSnapshots&
*
* client.listBlobsByHierarchy&
* System.out.printf&
* blob.getName&
* blob.isPrefix&
* blob.isDeleted&
* blob.getSnapshot&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
*
* @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories
* @param options {@link ListBlobsOptions}
* @return A reactive response emitting the prefixes and blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobsByHierarchy(String delimiter, ListBlobsOptions options) {
try {
return listBlobsHierarchyWithOptionalTimeout(delimiter, options, null);
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
/*
* Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous
* ContainerClient. Applies the given timeout to each Mono<ContainersListBlobHierarchySegmentResponse> backing the
* PagedFlux.
*
* @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories
* @param options {@link ListBlobsOptions}
* @param timeout An optional timeout to be applied to the network asynchronous operations.
* @return A reactive response emitting the listed blobs, flattened.
*/
PagedFlux<BlobItem> listBlobsHierarchyWithOptionalTimeout(String delimiter, ListBlobsOptions options,
Duration timeout) {
BiFunction<String, Integer, Mono<PagedResponse<BlobItem>>> func =
(marker, pageSize) -> {
ListBlobsOptions finalOptions;
/*
If pageSize was not set in a .byPage(int) method, the page size from options will be preserved.
Otherwise, prefer the new value.
*/
if (pageSize != null) {
if (options == null) {
finalOptions = new ListBlobsOptions().setMaxResultsPerPage(pageSize);
} else {
finalOptions = new ListBlobsOptions()
.setMaxResultsPerPage(pageSize)
.setPrefix(options.getPrefix())
.setDetails(options.getDetails());
}
} else {
finalOptions = options;
}
return listBlobsHierarchySegment(marker, delimiter, finalOptions, timeout)
.map(response -> {
List<BlobItem> value = response.getValue().getSegment() == null
? Collections.emptyList()
: Stream.concat(
response.getValue().getSegment().getBlobItems().stream().map(ModelHelper::populateBlobItem),
response.getValue().getSegment().getBlobPrefixes().stream()
.map(blobPrefix -> new BlobItem()
.setName(ModelHelper.toBlobNameString(blobPrefix.getName())).setIsPrefix(true))
).collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getValue().getNextMarker(),
response.getDeserializedHeaders());
});
};
return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func);
}
private Mono<ContainersListBlobHierarchySegmentResponse> listBlobsHierarchySegment(String marker, String delimiter,
ListBlobsOptions options, Duration timeout) {
options = options == null ? new ListBlobsOptions() : options;
if (options.getDetails().getRetrieveSnapshots()) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported."));
}
ArrayList<ListBlobsIncludeItem> include =
options.getDetails().toList().isEmpty() ? null : options.getDetails().toList();
return StorageImplUtils.applyOptionalTimeout(
this.azureBlobStorage.getContainers().listBlobHierarchySegmentWithResponseAsync(containerName, delimiter,
options.getPrefix(), marker, options.getMaxResultsPerPage(), include, null, null,
Context.NONE),
timeout);
}
/**
* Returns a reactive Publisher emitting the blobs in this container whose tags match the query expression. For more
* information, including information on the query syntax, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
* <pre>
* client.findBlobsByTags&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
*
* @param query Filters the results to return only blobs whose tags match the specified expression.
* @return A reactive response emitting the list of blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<TaggedBlobItem> findBlobsByTags(String query) {
try {
return this.findBlobsByTags(new FindBlobsOptions(query));
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
/**
* Returns a reactive Publisher emitting the blobs in this container whose tags match the query expression. For more
* information, including information on the query syntax, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
* <pre>
* client.findBlobsByTags&
* .subscribe&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
*
* @param options {@link FindBlobsOptions}
* @return A reactive response emitting the list of blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<TaggedBlobItem> findBlobsByTags(FindBlobsOptions options) {
try {
return findBlobsByTags(options, null);
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
PagedFlux<TaggedBlobItem> findBlobsByTags(FindBlobsOptions options, Duration timeout) {
StorageImplUtils.assertNotNull("options", options);
BiFunction<String, Integer, Mono<PagedResponse<TaggedBlobItem>>> func =
(marker, pageSize) -> withContext(context -> this.findBlobsByTags(
new FindBlobsOptions(options.getQuery()).setMaxResultsPerPage(pageSize), marker, timeout, context));
return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func);
}
PagedFlux<TaggedBlobItem> findBlobsByTags(FindBlobsOptions options, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", options);
BiFunction<String, Integer, Mono<PagedResponse<TaggedBlobItem>>> func =
(marker, pageSize) -> {
FindBlobsOptions finalOptions;
if (pageSize != null) {
finalOptions = new FindBlobsOptions(options.getQuery())
.setMaxResultsPerPage(pageSize);
} else {
finalOptions = options;
}
return this.findBlobsByTags(finalOptions, marker, timeout, context);
};
return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func);
}
private Mono<PagedResponse<TaggedBlobItem>> findBlobsByTags(
FindBlobsOptions options, String marker,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", options);
return StorageImplUtils.applyOptionalTimeout(
this.azureBlobStorage.getContainers().filterBlobsWithResponseAsync(containerName, null, null,
options.getQuery(), marker, options.getMaxResultsPerPage(),
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)), timeout)
.map(response -> {
List<TaggedBlobItem> value = response.getValue().getBlobs() == null
? Collections.emptyList()
: response.getValue().getBlobs().stream()
.map(ModelHelper::populateTaggedBlobItem)
.collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getValue().getNextMarker(),
response.getDeserializedHeaders());
});
}
/**
* Returns the sku name and account kind for the account. For more information, please see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfo -->
* <pre>
* client.getAccountInfo&
* System.out.printf&
* response.getAccountKind&
* response.getSkuName&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfo -->
*
* @return A reactive response containing the account info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<StorageAccountInfo> getAccountInfo() {
return getAccountInfoWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Returns the sku name and account kind for the account. For more information, please see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfoWithResponse -->
* <pre>
* client.getAccountInfoWithResponse&
* System.out.printf&
* response.getValue&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfoWithResponse -->
*
* @return A reactive response containing the account info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() {
try {
return withContext(this::getAccountInfoWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().getAccountInfoWithResponseAsync(containerName,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(rb -> {
ContainersGetAccountInfoHeaders hd = rb.getDeserializedHeaders();
return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getXMsSkuName(), hd.getXMsAccountKind()));
});
}
/**
* Generates a user delegation SAS for the container using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* BlobContainerSasPermission myPermission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceAsyncClient
* how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the container using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* BlobContainerSasPermission myPermission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceAsyncClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new BlobSasImplUtil(blobServiceSasSignatureValues, getBlobContainerName())
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the container using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* BlobContainerSasPermission permission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return generateSas(blobServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the container using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* BlobContainerSasPermission permission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return new BlobSasImplUtil(blobServiceSasSignatureValues, getBlobContainerName())
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
private static boolean validateNoETag(BlobRequestConditions modifiedRequestConditions) {
if (modifiedRequestConditions == null) {
return true;
}
return modifiedRequestConditions.getIfMatch() == null && modifiedRequestConditions.getIfNoneMatch() == null;
}
} | class BlobContainerAsyncClient {
/**
* Special container name for the root container in the Storage account.
*/
public static final String ROOT_CONTAINER_NAME = "$root";
/**
* Special container name for the static website container in the Storage account.
*/
public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web";
/**
* Special container name for the logs container in the Storage account.
*/
public static final String LOG_CONTAINER_NAME = "$logs";
private static final ClientLogger LOGGER = new ClientLogger(BlobContainerAsyncClient.class);
private final AzureBlobStorageImpl azureBlobStorage;
private final String accountName;
private final String containerName;
private final BlobServiceVersion serviceVersion;
private final CpkInfo customerProvidedKey;
private final EncryptionScope encryptionScope;
private final BlobContainerEncryptionScope blobContainerEncryptionScope;
/**
* Package-private constructor for use by {@link BlobContainerClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
*/
BlobContainerAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion,
String accountName, String containerName, CpkInfo customerProvidedKey, EncryptionScope encryptionScope,
BlobContainerEncryptionScope blobContainerEncryptionScope) {
this.azureBlobStorage = new AzureBlobStorageImplBuilder()
.pipeline(pipeline)
.url(url)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
this.accountName = accountName;
this.containerName = containerName;
this.customerProvidedKey = customerProvidedKey;
this.encryptionScope = encryptionScope;
this.blobContainerEncryptionScope = blobContainerEncryptionScope;
/* Check to make sure the uri is valid. We don't want the error to occur later in the generated layer
when the sas token has already been applied. */
try {
URI.create(getBlobContainerUrl());
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
/**
* Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new
* BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
* <pre>
* BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
*
* @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters,
* pass in the url encoded version of the blob name.
* @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container.
*/
public BlobAsyncClient getBlobAsyncClient(String blobName) {
return getBlobAsyncClient(blobName, null);
}
/**
* Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new
* BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
* <pre>
* BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient
*
* @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters,
* pass in the url encoded version of the blob name.
* @param snapshot the snapshot identifier for the blob.
* @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container.
*/
public BlobAsyncClient getBlobAsyncClient(String blobName, String snapshot) {
return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getBlobContainerName(), blobName, snapshot, getCustomerProvidedKey(), encryptionScope);
}
/**
* Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new
* BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient.
*
* @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters,
* pass in the url encoded version of the blob name.
* @param versionId the version identifier for the blob, pass {@code null} to interact with the latest blob version.
* @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container.
*/
public BlobAsyncClient getBlobVersionAsyncClient(String blobName, String versionId) {
return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getBlobContainerName(), blobName, null, getCustomerProvidedKey(), encryptionScope, versionId);
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureBlobStorage.getUrl();
}
/**
* Gets the URL of the container represented by this client.
*
* @return the URL.
*/
public String getBlobContainerUrl() {
return azureBlobStorage.getUrl() + "/" + containerName;
}
/**
* Get the container name.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getBlobContainerName -->
* <pre>
* String containerName = client.getBlobContainerName&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getBlobContainerName -->
*
* @return The name of container.
*/
public String getBlobContainerName() {
return containerName;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Get an async client pointing to the account.
*
* @return {@link BlobServiceAsyncClient}
*/
public BlobServiceAsyncClient getServiceAsyncClient() {
return getServiceClientBuilder().buildAsyncClient();
}
BlobServiceClientBuilder getServiceClientBuilder() {
CustomerProvidedKey encryptionKey = this.customerProvidedKey == null ? null
: new CustomerProvidedKey(this.customerProvidedKey.getEncryptionKey());
return new BlobServiceClientBuilder()
.endpoint(this.getBlobContainerUrl())
.pipeline(this.getHttpPipeline())
.serviceVersion(this.serviceVersion)
.blobContainerEncryptionScope(this.blobContainerEncryptionScope)
.encryptionScope(this.getEncryptionScope())
.customerProvidedKey(encryptionKey);
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureBlobStorage.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} associated with this client that will be passed to {@link BlobAsyncClient
* BlobAsyncClients} when {@link
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return customerProvidedKey;
}
/**
* Gets the {@link EncryptionScope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
public String getEncryptionScope() {
if (encryptionScope == null) {
return null;
}
return encryptionScope.getEncryptionScope();
}
/**
* Gets if the container this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.exists -->
* <pre>
* client.exists&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.exists -->
*
* @return true if the container exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Boolean> exists() {
return existsWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets if the container this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.existsWithResponse -->
* <pre>
* client.existsWithResponse&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.existsWithResponse -->
*
* @return true if the container exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Boolean>> existsWithResponse() {
try {
return withContext(this::existsWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Gets if the container this client represents exists in the cloud.
*
* @return true if the container exists, false if it doesn't
*/
Mono<Response<Boolean>> existsWithResponse(Context context) {
return this.getPropertiesWithResponse(null, context)
.map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true))
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404,
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false));
});
}
/**
* Creates a new container within a storage account. If a container with the same name already exists, the operation
* fails. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.create -->
* <pre>
* client.create&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.create -->
*
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> create() {
return createWithResponse(null, null).flatMap(FluxUtil::toMono);
}
/**
* Creates a new container within a storage account. If a container with the same name already exists, the operation
* fails. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.createWithResponse
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* client.createWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.createWithResponse
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> createWithResponse(Map<String, String> metadata, PublicAccessType accessType) {
try {
return withContext(context -> createWithResponse(metadata, accessType, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> createWithResponse(Map<String, String> metadata, PublicAccessType accessType,
Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().createWithResponseAsync(
containerName, null, metadata, accessType, null, blobContainerEncryptionScope,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Creates a new container within a storage account if it does not exist. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExists -->
* <pre>
* client.createIfNotExists&
* if &
* System.out.println&
* &
* System.out.println&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExists -->
*
* @return A reactive response signaling completion. {@code true} indicates a new container was created,
* {@code true} indicates a container already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Creates a new container within a storage account if it does not exist. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExistsWithResponse
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* BlobContainerCreateOptions options = new BlobContainerCreateOptions&
* .setPublicAccessType&
*
* client.createIfNotExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.println&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.createIfNotExistsWithResponse
*
* @param options {@link BlobContainerCreateOptions}
* @return A reactive response signaling completion. If {@link Response}'s status code is 201, a new container was
* successfully created. If status code is 409, a container already existed at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> createIfNotExistsWithResponse(BlobContainerCreateOptions options) {
try {
return createIfNotExistsWithResponse(options, null);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> createIfNotExistsWithResponse(BlobContainerCreateOptions options, Context context) {
try {
options = options == null ? new BlobContainerCreateOptions() : options;
return createWithResponse(options.getMetadata(), options.getPublicAccessType(), context)
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t)
.getStatusCode() == 409,
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), null));
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Marks the specified container for deletion. The container and any blobs contained within it are later deleted
* during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.delete -->
* <pre>
* client.delete&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.delete -->
*
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> delete() {
return deleteWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Marks the specified container for deletion. The container and any blobs contained within it are later deleted
* during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.deleteWithResponse
* <pre>
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.deleteWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.deleteWithResponse
*
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
* @throws UnsupportedOperationException If either {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponse(BlobRequestConditions requestConditions) {
try {
return withContext(context -> deleteWithResponse(requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> deleteWithResponse(BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
if (!validateNoETag(requestConditions)) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("ETag access conditions are not supported for this API."));
}
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().deleteWithResponseAsync(containerName, null,
requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Marks the specified container for deletion if it exists. The container and any blobs contained within it are later deleted
* during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExists -->
* <pre>
* client.deleteIfExists&
* if &
* System.out.println&
* &
* System.out.println&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExists -->
*
* @return A reactive response signaling completion. {@code true} indicates the container was deleted,
* {@code false} indicates the container does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Boolean> deleteIfExists() {
return deleteIfExistsWithResponse(null).map(response -> response.getStatusCode() != 404);
}
/**
* Marks the specified container for deletion if it exists. The container and any blobs contained within it are
* later deleted during garbage collection. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExistsWithResponse
* <pre>
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.deleteIfExistsWithResponse&
* if &
* System.out.println&
* &
* System.out.println&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.deleteIfExistsWithResponse
*
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signaling completion. If {@link Response}'s status code is 202, the container was
* successfully deleted. If status code is 404, the container does not exist.
* @throws UnsupportedOperationException If either {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteIfExistsWithResponse(BlobRequestConditions requestConditions) {
try {
return deleteIfExistsWithResponse(requestConditions, null);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> deleteIfExistsWithResponse(BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
try {
return deleteWithResponse(requestConditions, context)
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404,
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), null));
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Returns the container's metadata and system properties. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getProperties -->
* <pre>
* client.getProperties&
* System.out.printf&
* response.getBlobPublicAccess&
* response.hasLegalHold&
* response.hasImmutabilityPolicy&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getProperties -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* container properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlobContainerProperties> getProperties() {
return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Returns the container's metadata and system properties. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getPropertiesWithResponse
* <pre>
* client.getPropertiesWithResponse&
* System.out.printf&
* response.getValue&
* response.getValue&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getPropertiesWithResponse
*
* @param leaseId The lease ID the active lease on the container must match.
* @return A reactive response containing the container properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlobContainerProperties>> getPropertiesWithResponse(String leaseId) {
try {
return withContext(context -> getPropertiesWithResponse(leaseId, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<BlobContainerProperties>> getPropertiesWithResponse(String leaseId, Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers()
.getPropertiesWithResponseAsync(containerName, null, leaseId, null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(rb -> {
ContainersGetPropertiesHeaders hd = rb.getDeserializedHeaders();
BlobContainerProperties properties = new BlobContainerProperties(hd.getXMsMeta(), hd.getETag(),
hd.getLastModified(), hd.getXMsLeaseDuration(), hd.getXMsLeaseState(), hd.getXMsLeaseStatus(),
hd.getXMsBlobPublicAccess(), Boolean.TRUE.equals(hd.isXMsHasImmutabilityPolicy()),
Boolean.TRUE.equals(hd.isXMsHasLegalHold()), hd.getXMsDefaultEncryptionScope(),
hd.isXMsDenyEncryptionScopeOverride(), hd.isXMsImmutableStorageWithVersioningEnabled());
return new SimpleResponse<>(rb, properties);
});
}
/**
* Sets the container's metadata. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setMetadata
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* client.setMetadata&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setMetadata
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> setMetadata(Map<String, String> metadata) {
return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono);
}
/**
* Sets the container's metadata. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setMetadataWithResponse
* <pre>
* Map<String, String> metadata = Collections.singletonMap&
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.setMetadataWithResponse&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setMetadataWithResponse
*
* @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
* @throws UnsupportedOperationException If one of {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> setMetadataWithResponse(metadata, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata,
BlobRequestConditions requestConditions, Context context) {
context = context == null ? Context.NONE : context;
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
if (!validateNoETag(requestConditions) || requestConditions.getIfUnmodifiedSince() != null) {
throw LOGGER.logExceptionAsError(new UnsupportedOperationException(
"If-Modified-Since is the only HTTP access condition supported for this API"));
}
return this.azureBlobStorage.getContainers().setMetadataWithResponseAsync(containerName, null,
requestConditions.getLeaseId(), metadata, requestConditions.getIfModifiedSince(), null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicy -->
* <pre>
* client.getAccessPolicy&
* System.out.printf&
*
* for &
* System.out.printf&
* identifier.getId&
* identifier.getAccessPolicy&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicy -->
*
* @return A reactive response containing the container access policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<BlobContainerAccessPolicies> getAccessPolicy() {
return getAccessPolicyWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly.
* For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicyWithResponse
* <pre>
* client.getAccessPolicyWithResponse&
* System.out.printf&
*
* for &
* System.out.printf&
* identifier.getId&
* identifier.getAccessPolicy&
* &
* &
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicyWithResponse
*
* @param leaseId The lease ID the active lease on the container must match.
* @return A reactive response containing the container access policy.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<BlobContainerAccessPolicies>> getAccessPolicyWithResponse(String leaseId) {
try {
return withContext(context -> getAccessPolicyWithResponse(leaseId, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<BlobContainerAccessPolicies>> getAccessPolicyWithResponse(String leaseId, Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().getAccessPolicyWithResponseAsync(
containerName, null, leaseId, null,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response,
new BlobContainerAccessPolicies(response.getDeserializedHeaders().getXMsBlobPublicAccess(),
response.getValue())));
}
/**
* Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly.
* Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to
* ensure the time formatting is compatible with the service. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicy
* <pre>
* BlobSignedIdentifier identifier = new BlobSignedIdentifier&
* .setId&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* client.setAccessPolicy&
* response -> System.out.printf&
* error -> System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicy
*
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @param identifiers A list of {@link BlobSignedIdentifier} objects that specify the permissions for the container.
* Please see
* <a href="https:
* for more information. Passing null will clear all access policies.
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> setAccessPolicy(PublicAccessType accessType, List<BlobSignedIdentifier> identifiers) {
return setAccessPolicyWithResponse(accessType, identifiers, null).flatMap(FluxUtil::toMono);
}
/**
* Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly.
* Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to
* ensure the time formatting is compatible with the service. For more information, see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicyWithResponse
* <pre>
* BlobSignedIdentifier identifier = new BlobSignedIdentifier&
* .setId&
* .setAccessPolicy&
* .setStartsOn&
* .setExpiresOn&
* .setPermissions&
*
* BlobRequestConditions requestConditions = new BlobRequestConditions&
* .setLeaseId&
* .setIfUnmodifiedSince&
*
* client.setAccessPolicyWithResponse&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicyWithResponse
*
* @param accessType Specifies how the data in this container is available to the public. See the
* x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access.
* @param identifiers A list of {@link BlobSignedIdentifier} objects that specify the permissions for the container.
* Please see
* <a href="https:
* for more information. Passing null will clear all access policies.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
* @throws UnsupportedOperationException If either {@link BlobRequestConditions
* {@link BlobRequestConditions
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> setAccessPolicyWithResponse(PublicAccessType accessType,
List<BlobSignedIdentifier> identifiers, BlobRequestConditions requestConditions) {
try {
return withContext(context ->
setAccessPolicyWithResponse(accessType, identifiers, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<Void>> setAccessPolicyWithResponse(PublicAccessType accessType,
List<BlobSignedIdentifier> identifiers, BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
if (!validateNoETag(requestConditions)) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("ETag access conditions are not supported for this API."));
}
/*
We truncate to seconds because the service only supports nanoseconds or seconds, but doing an
OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This
allows for proper serialization with no real detriment to users as sub-second precision on active time for
signed identifiers is not really necessary.
*/
if (identifiers != null) {
for (BlobSignedIdentifier identifier : identifiers) {
if (identifier.getAccessPolicy() != null && identifier.getAccessPolicy().getStartsOn() != null) {
identifier.getAccessPolicy().setStartsOn(
identifier.getAccessPolicy().getStartsOn().truncatedTo(ChronoUnit.SECONDS));
}
if (identifier.getAccessPolicy() != null && identifier.getAccessPolicy().getExpiresOn() != null) {
identifier.getAccessPolicy().setExpiresOn(
identifier.getAccessPolicy().getExpiresOn().truncatedTo(ChronoUnit.SECONDS));
}
}
}
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().setAccessPolicyWithResponseAsync(
containerName, null, requestConditions.getLeaseId(), accessType, requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), null, identifiers,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are
* flattened and only actual blobs and no directories are returned.
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return
*
* <ul>
* <li>foo/foo1
* <li>foo/foo2
* <li>bar
* </ul>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobs -->
* <pre>
* client.listBlobs&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobs -->
*
* @return A reactive response emitting the flattened blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobs() {
return this.listBlobs(new ListBlobsOptions());
}
/**
* Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are
* flattened and only actual blobs and no directories are returned.
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return
*
* <ul>
* <li>foo/foo1
* <li>foo/foo2
* <li>bar
* </ul>
*
*
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
* <pre>
* ListBlobsOptions options = new ListBlobsOptions&
* .setPrefix&
* .setDetails&
* .setRetrieveDeletedBlobs&
* .setRetrieveSnapshots&
*
* client.listBlobs&
* System.out.printf&
* blob.getName&
* blob.isPrefix&
* blob.isDeleted&
* blob.getSnapshot&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
*
* @param options {@link ListBlobsOptions}
* @return A reactive response emitting the listed blobs, flattened.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobs(ListBlobsOptions options) {
return listBlobs(options, null);
}
/**
* Returns a reactive Publisher emitting all the blobs in this container lazily as needed. The directories are
* flattened and only actual blobs and no directories are returned.
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return
*
* <ul>
* <li>foo/foo1
* <li>foo/foo2
* <li>bar
* </ul>
*
*
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
* <pre>
* ListBlobsOptions options = new ListBlobsOptions&
* .setPrefix&
* .setDetails&
* .setRetrieveDeletedBlobs&
* .setRetrieveSnapshots&
*
* String continuationToken = "continuationToken";
*
* client.listBlobs&
* System.out.printf&
* blob.getName&
* blob.isPrefix&
* blob.isDeleted&
* blob.getSnapshot&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobs
*
* @param options {@link ListBlobsOptions}
* @param continuationToken Identifies the portion of the list to be returned with the next list operation.
* @return A reactive response emitting the listed blobs, flattened.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobs(ListBlobsOptions options, String continuationToken) {
try {
return listBlobsFlatWithOptionalTimeout(options, continuationToken, null);
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
/*
* Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous
* ContainerClient. Applies the given timeout to each Mono<ContainersListBlobFlatSegmentResponse> backing the
* PagedFlux.
*
* @param options {@link ListBlobsOptions}.
* @param timeout An optional timeout to be applied to the network asynchronous operations.
* @return A reactive response emitting the listed blobs, flattened.
*/
PagedFlux<BlobItem> listBlobsFlatWithOptionalTimeout(ListBlobsOptions options, String continuationToken,
Duration timeout) {
BiFunction<String, Integer, Mono<PagedResponse<BlobItem>>> func =
(marker, pageSize) -> {
ListBlobsOptions finalOptions;
if (pageSize != null) {
if (options == null) {
finalOptions = new ListBlobsOptions().setMaxResultsPerPage(pageSize);
} else {
finalOptions = new ListBlobsOptions()
.setMaxResultsPerPage(pageSize)
.setPrefix(options.getPrefix())
.setDetails(options.getDetails());
}
} else {
finalOptions = options;
}
return listBlobsFlatSegment(marker, finalOptions, timeout)
.map(response -> {
List<BlobItem> value = response.getValue().getSegment() == null
? Collections.emptyList()
: response.getValue().getSegment().getBlobItems().stream()
.map(ModelHelper::populateBlobItem)
.collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getValue().getNextMarker(),
response.getDeserializedHeaders());
});
};
return new PagedFlux<>(pageSize -> func.apply(continuationToken, pageSize), func);
}
/*
* Returns a single segment of blobs starting from the specified Marker. Use an empty
* marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
* After getting a segment, process it, and then call ListBlobs again (passing the previously-returned
* Marker) to get the next segment. For more information, see the
* <a href="https:
*
* @param marker
* Identifies the portion of the list to be returned with the next list operation.
* This value is returned by the response of a previous list operation as the
* ListBlobsFlatSegmentResponse.body().getNextMarker(). Set to null to list the first segment.
* @param options
* {@link ListBlobsOptions}
*
* @return Emits the successful response.
*/
private Mono<ContainersListBlobFlatSegmentResponse> listBlobsFlatSegment(String marker, ListBlobsOptions options,
Duration timeout) {
options = options == null ? new ListBlobsOptions() : options;
ArrayList<ListBlobsIncludeItem> include =
options.getDetails().toList().isEmpty() ? null : options.getDetails().toList();
return StorageImplUtils.applyOptionalTimeout(
this.azureBlobStorage.getContainers().listBlobFlatSegmentWithResponseAsync(containerName, options.getPrefix(),
marker, options.getMaxResultsPerPage(), include,
null, null, Context.NONE), timeout);
}
/**
* Returns a reactive Publisher emitting all the blobs and directories (prefixes) under the given directory
* (prefix). Directories will have {@link BlobItem
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return the following results when prefix=null:
*
* <ul>
* <li>foo/ (isPrefix = true)
* <li>bar (isPrefix = false)
* </ul>
* <p>
* will return the following results when prefix="foo/":
*
* <ul>
* <li>foo/foo1 (isPrefix = false)
* <li>foo/foo2 (isPrefix = false)
* </ul>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
* <pre>
* client.listBlobsByHierarchy&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
*
* @param directory The directory to list blobs underneath
* @return A reactive response emitting the prefixes and blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobsByHierarchy(String directory) {
return this.listBlobsByHierarchy("/", new ListBlobsOptions().setPrefix(directory));
}
/**
* Returns a reactive Publisher emitting all the blobs and prefixes (directories) under the given prefix
* (directory). Directories will have {@link BlobItem
*
* <p>
* Blob names are returned in lexicographic order. For more information, see the
* <a href="https:
*
* <p>
* E.g. listing a container containing a 'foo' folder, which contains blobs 'foo1' and 'foo2', and a blob on the
* root level 'bar', will return the following results when prefix=null:
*
* <ul>
* <li>foo/ (isPrefix = true)
* <li>bar (isPrefix = false)
* </ul>
* <p>
* will return the following results when prefix="foo/":
*
* <ul>
* <li>foo/foo1 (isPrefix = false)
* <li>foo/foo2 (isPrefix = false)
* </ul>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
* <pre>
* ListBlobsOptions options = new ListBlobsOptions&
* .setPrefix&
* .setDetails&
* .setRetrieveDeletedBlobs&
* .setRetrieveSnapshots&
*
* client.listBlobsByHierarchy&
* System.out.printf&
* blob.getName&
* blob.isPrefix&
* blob.isDeleted&
* blob.getSnapshot&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy
*
* @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories
* @param options {@link ListBlobsOptions}
* @return A reactive response emitting the prefixes and blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<BlobItem> listBlobsByHierarchy(String delimiter, ListBlobsOptions options) {
try {
return listBlobsHierarchyWithOptionalTimeout(delimiter, options, null);
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
/*
* Implementation for this paged listing operation, supporting an optional timeout provided by the synchronous
* ContainerClient. Applies the given timeout to each Mono<ContainersListBlobHierarchySegmentResponse> backing the
* PagedFlux.
*
* @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories
* @param options {@link ListBlobsOptions}
* @param timeout An optional timeout to be applied to the network asynchronous operations.
* @return A reactive response emitting the listed blobs, flattened.
*/
PagedFlux<BlobItem> listBlobsHierarchyWithOptionalTimeout(String delimiter, ListBlobsOptions options,
Duration timeout) {
BiFunction<String, Integer, Mono<PagedResponse<BlobItem>>> func =
(marker, pageSize) -> {
ListBlobsOptions finalOptions;
/*
If pageSize was not set in a .byPage(int) method, the page size from options will be preserved.
Otherwise, prefer the new value.
*/
if (pageSize != null) {
if (options == null) {
finalOptions = new ListBlobsOptions().setMaxResultsPerPage(pageSize);
} else {
finalOptions = new ListBlobsOptions()
.setMaxResultsPerPage(pageSize)
.setPrefix(options.getPrefix())
.setDetails(options.getDetails());
}
} else {
finalOptions = options;
}
return listBlobsHierarchySegment(marker, delimiter, finalOptions, timeout)
.map(response -> {
List<BlobItem> value = response.getValue().getSegment() == null
? Collections.emptyList()
: Stream.concat(
response.getValue().getSegment().getBlobItems().stream().map(ModelHelper::populateBlobItem),
response.getValue().getSegment().getBlobPrefixes().stream()
.map(blobPrefix -> new BlobItem()
.setName(ModelHelper.toBlobNameString(blobPrefix.getName())).setIsPrefix(true))
).collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getValue().getNextMarker(),
response.getDeserializedHeaders());
});
};
return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func);
}
private Mono<ContainersListBlobHierarchySegmentResponse> listBlobsHierarchySegment(String marker, String delimiter,
ListBlobsOptions options, Duration timeout) {
options = options == null ? new ListBlobsOptions() : options;
if (options.getDetails().getRetrieveSnapshots()) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported."));
}
ArrayList<ListBlobsIncludeItem> include =
options.getDetails().toList().isEmpty() ? null : options.getDetails().toList();
return StorageImplUtils.applyOptionalTimeout(
this.azureBlobStorage.getContainers().listBlobHierarchySegmentWithResponseAsync(containerName, delimiter,
options.getPrefix(), marker, options.getMaxResultsPerPage(), include, null, null,
Context.NONE),
timeout);
}
/**
* Returns a reactive Publisher emitting the blobs in this container whose tags match the query expression. For more
* information, including information on the query syntax, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
* <pre>
* client.findBlobsByTags&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
*
* @param query Filters the results to return only blobs whose tags match the specified expression.
* @return A reactive response emitting the list of blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<TaggedBlobItem> findBlobsByTags(String query) {
try {
return this.findBlobsByTags(new FindBlobsOptions(query));
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
/**
* Returns a reactive Publisher emitting the blobs in this container whose tags match the query expression. For more
* information, including information on the query syntax, see the <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
* <pre>
* client.findBlobsByTags&
* .subscribe&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.findBlobsByTag
*
* @param options {@link FindBlobsOptions}
* @return A reactive response emitting the list of blobs.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<TaggedBlobItem> findBlobsByTags(FindBlobsOptions options) {
try {
return findBlobsByTags(options, null);
} catch (RuntimeException ex) {
return pagedFluxError(LOGGER, ex);
}
}
PagedFlux<TaggedBlobItem> findBlobsByTags(FindBlobsOptions options, Duration timeout) {
StorageImplUtils.assertNotNull("options", options);
BiFunction<String, Integer, Mono<PagedResponse<TaggedBlobItem>>> func =
(marker, pageSize) -> withContext(context -> this.findBlobsByTags(
new FindBlobsOptions(options.getQuery()).setMaxResultsPerPage(pageSize), marker, timeout, context));
return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func);
}
PagedFlux<TaggedBlobItem> findBlobsByTags(FindBlobsOptions options, Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", options);
BiFunction<String, Integer, Mono<PagedResponse<TaggedBlobItem>>> func =
(marker, pageSize) -> {
FindBlobsOptions finalOptions;
if (pageSize != null) {
finalOptions = new FindBlobsOptions(options.getQuery())
.setMaxResultsPerPage(pageSize);
} else {
finalOptions = options;
}
return this.findBlobsByTags(finalOptions, marker, timeout, context);
};
return new PagedFlux<>(pageSize -> func.apply(null, pageSize), func);
}
private Mono<PagedResponse<TaggedBlobItem>> findBlobsByTags(
FindBlobsOptions options, String marker,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", options);
return StorageImplUtils.applyOptionalTimeout(
this.azureBlobStorage.getContainers().filterBlobsWithResponseAsync(containerName, null, null,
options.getQuery(), marker, options.getMaxResultsPerPage(),
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)), timeout)
.map(response -> {
List<TaggedBlobItem> value = response.getValue().getBlobs() == null
? Collections.emptyList()
: response.getValue().getBlobs().stream()
.map(ModelHelper::populateTaggedBlobItem)
.collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getValue().getNextMarker(),
response.getDeserializedHeaders());
});
}
/**
* Returns the sku name and account kind for the account. For more information, please see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfo -->
* <pre>
* client.getAccountInfo&
* System.out.printf&
* response.getAccountKind&
* response.getSkuName&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfo -->
*
* @return A reactive response containing the account info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<StorageAccountInfo> getAccountInfo() {
return getAccountInfoWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Returns the sku name and account kind for the account. For more information, please see the
* <a href="https:
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfoWithResponse -->
* <pre>
* client.getAccountInfoWithResponse&
* System.out.printf&
* response.getValue&
* response.getValue&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfoWithResponse -->
*
* @return A reactive response containing the account info.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse() {
try {
return withContext(this::getAccountInfoWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono<Response<StorageAccountInfo>> getAccountInfoWithResponse(Context context) {
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getContainers().getAccountInfoWithResponseAsync(containerName,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(rb -> {
ContainersGetAccountInfoHeaders hd = rb.getDeserializedHeaders();
return new SimpleResponse<>(rb, new StorageAccountInfo(hd.getXMsSkuName(), hd.getXMsAccountKind()));
});
}
/**
* Generates a user delegation SAS for the container using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* BlobContainerSasPermission myPermission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceAsyncClient
* how to get a user delegation key.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey) {
return generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey, getAccountName(),
Context.NONE);
}
/**
* Generates a user delegation SAS for the container using the specified {@link BlobServiceSasSignatureValues}.
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
* <pre>
* OffsetDateTime myExpiryTime = OffsetDateTime.now&
* BlobContainerSasPermission myPermission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* client.generateUserDelegationSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values.
* See {@link BlobServiceAsyncClient
* how to get a user delegation key.
* @param accountName The account name.
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues,
UserDelegationKey userDelegationKey, String accountName, Context context) {
return new BlobSasImplUtil(blobServiceSasSignatureValues, getBlobContainerName())
.generateUserDelegationSas(userDelegationKey, accountName, context);
}
/**
* Generates a service SAS for the container using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* BlobContainerSasPermission permission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) {
return generateSas(blobServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the container using the specified {@link BlobServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.blob.BlobContainerAsyncClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* BlobContainerSasPermission permission = new BlobContainerSasPermission&
*
* BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues&
* .setStartTime&
*
* &
* client.generateSas&
* </pre>
* <!-- end com.azure.storage.blob.BlobContainerAsyncClient.generateSas
*
* @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, Context context) {
return new BlobSasImplUtil(blobServiceSasSignatureValues, getBlobContainerName())
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
private static boolean validateNoETag(BlobRequestConditions modifiedRequestConditions) {
if (modifiedRequestConditions == null) {
return true;
}
return modifiedRequestConditions.getIfMatch() == null && modifiedRequestConditions.getIfNoneMatch() == null;
}
} |
and here. | private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | "", | private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"", /* Encryption scope. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
Do we not force SAS generation to be on latest version? | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
we do, however there's a way to override it globally https://github.com/Azure/azure-sdk-for-java/blob/ad3586aef64218360521d72848f2befe6b7f5971/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/Constants.java#L88-L91 . | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
Discussed offline apparently we no longer force this. | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
Added it. I couldn't remember if that was a one off thing or we do for every string to sign change | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
done | private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(final UserDelegationKey key, String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
"", /* suoid - empty since this applies to HNS only accounts. */
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId) {
this(sasValues, containerName, blobName, snapshotId, versionId, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
/*
Prefer the encryption scope explicitly set on the sas values. If none present, fallback to the value on the
client.
*/
this.encryptionScope = sasValues.getEncryptionScope() == null
? encryptionScope : sasValues.getEncryptionScope();
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class BlobSasImplUtil {
/**
* The SAS blob constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS blob snapshot constant.
*/
private static final String SAS_BLOB_SNAPSHOT_CONSTANT = "bs";
/**
* The SAS blob version constant.
*/
private static final String SAS_BLOB_VERSION_CONSTANT = "bv";
/**
* The SAS blob container constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(BlobSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, BlobServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String containerName;
private String blobName;
private String resource;
private String snapshotId;
private String versionId;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private String authorizedAadObjectId;
private String correlationId;
private String encryptionScope;
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName) {
this(sasValues, containerName, null, null, null, null);
}
/**
* Creates a new {@link BlobSasImplUtil} with the specified parameters
*
* @param sasValues {@link BlobServiceSasSignatureValues}
* @param containerName The container name
* @param blobName The blob name
* @param snapshotId The snapshot id
* @param versionId The version id
* @param encryptionScope The encryption scope
*/
public BlobSasImplUtil(BlobServiceSasSignatureValues sasValues, String containerName, String blobName,
String snapshotId, String versionId, String encryptionScope) {
Objects.requireNonNull(sasValues);
if (snapshotId != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.containerName = containerName;
this.blobName = blobName;
this.snapshotId = snapshotId;
this.versionId = versionId;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.encryptionScope = encryptionScope;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_ENCRYPTION_SCOPE, this.encryptionScope);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no version, use latest.
* 2. If there is no identifier set, ensure expiryTime and permissions are set.
* 3. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 4. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(blobName)) {
resource = SAS_CONTAINER_CONSTANT;
} else if (snapshotId != null) {
resource = SAS_BLOB_SNAPSHOT_CONSTANT;
} else if (versionId != null) {
resource = SAS_BLOB_VERSION_CONSTANT;
} else {
resource = SAS_BLOB_CONSTANT;
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_BLOB_SNAPSHOT_CONSTANT:
case SAS_BLOB_VERSION_CONSTANT:
permissions = BlobSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = BlobContainerSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(blobName)
? String.format("/blob/%s/%s", account, containerName)
: String.format("/blob/%s/%s/%s", account, containerName, blobName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
String versionSegment = this.snapshotId == null ? this.versionId : this.snapshotId;
if (VERSION.compareTo(BlobServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
versionSegment == null ? "" : versionSegment,
this.encryptionScope == null ? "" : this.encryptionScope,
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
We don't have something like BlobServiceVersion available in the common package to compare against. I can create a similar enum for common, or I can drop a string literal in the conditional statement. Any preference? | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
this.encryptionScope == null ? "" : this.encryptionScope,
""
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
""
);
} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} |
inline string should be fine. | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
this.encryptionScope == null ? "" : this.encryptionScope,
""
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
""
);
} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} |
Done | private String stringToSign(String canonicalName) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} | "", | private String stringToSign(String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"", /* Encryption scope. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
Done | private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | "", | private String stringToSign(final UserDelegationKey key, String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2019_12_12.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
else {
return String.join("\n",
this.permissions == null ? "" : this.permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
key.getSignedObjectId() == null ? "" : key.getSignedObjectId(),
key.getSignedTenantId() == null ? "" : key.getSignedTenantId(),
key.getSignedStart() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedStart()),
key.getSignedExpiry() == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(key.getSignedExpiry()),
key.getSignedService() == null ? "" : key.getSignedService(),
key.getSignedVersion() == null ? "" : key.getSignedVersion(),
this.authorizedAadObjectId == null ? "" : this.authorizedAadObjectId,
this.unauthorizedAadObjectId == null ? "" : this.unauthorizedAadObjectId,
this.correlationId == null ? "" : this.correlationId,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"", /* Encryption scope. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
} | class DataLakeSasImplUtil {
/**
* The SAS blob (datalake file) constant.
*/
private static final String SAS_BLOB_CONSTANT = "b";
/**
* The SAS directory (datalake directory) constant.
*/
private static final String SAS_DIRECTORY_CONSTANT = "d";
/**
* The SAS blob container (datalake file system) constant.
*/
private static final String SAS_CONTAINER_CONSTANT = "c";
private static final ClientLogger LOGGER = new ClientLogger(DataLakeSasImplUtil.class);
private static final String VERSION = Configuration.getGlobalConfiguration()
.get(Constants.PROPERTY_AZURE_STORAGE_SAS_SERVICE_VERSION, DataLakeServiceVersion.getLatest().getVersion());
private SasProtocol protocol;
private OffsetDateTime startTime;
private OffsetDateTime expiryTime;
private String permissions;
private SasIpRange sasIpRange;
private String fileSystemName;
private String pathName;
private String resource;
private String identifier;
private String cacheControl;
private String contentDisposition;
private String contentEncoding;
private String contentLanguage;
private String contentType;
private Boolean isDirectory;
private Integer directoryDepth;
private String authorizedAadObjectId;
private String unauthorizedAadObjectId;
private String correlationId;
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName) {
this(sasValues, fileSystemName, null, false);
}
/**
* Creates a new {@link DataLakeSasImplUtil} with the specified parameters
*
* @param sasValues {@link DataLakeServiceSasSignatureValues}
* @param fileSystemName The file system name
* @param pathName The path name
* @param isDirectory Whether or not the path points to a directory.
*/
public DataLakeSasImplUtil(DataLakeServiceSasSignatureValues sasValues, String fileSystemName, String pathName,
boolean isDirectory) {
Objects.requireNonNull(sasValues);
this.protocol = sasValues.getProtocol();
this.startTime = sasValues.getStartTime();
this.expiryTime = sasValues.getExpiryTime();
this.permissions = sasValues.getPermissions();
this.sasIpRange = sasValues.getSasIpRange();
this.fileSystemName = fileSystemName;
this.pathName = pathName;
this.identifier = sasValues.getIdentifier();
this.cacheControl = sasValues.getCacheControl();
this.contentDisposition = sasValues.getContentDisposition();
this.contentEncoding = sasValues.getContentEncoding();
this.contentLanguage = sasValues.getContentLanguage();
this.contentType = sasValues.getContentType();
this.authorizedAadObjectId = sasValues.getPreauthorizedAgentObjectId();
this.unauthorizedAadObjectId = sasValues.getAgentObjectId();
this.correlationId = sasValues.getCorrelationId();
this.isDirectory = isDirectory;
}
/**
* Generates a Sas signed with a {@link StorageSharedKeyCredential}
*
* @param storageSharedKeyCredentials {@link StorageSharedKeyCredential}
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateSas(StorageSharedKeyCredential storageSharedKeyCredentials, Context context) {
StorageImplUtils.assertNotNull("storageSharedKeyCredentials", storageSharedKeyCredentials);
ensureState();
final String canonicalName = getCanonicalName(storageSharedKeyCredentials.getAccountName());
final String stringToSign = stringToSign(canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
final String signature = storageSharedKeyCredentials.computeHmac256(stringToSign);
return encode(null /* userDelegationKey */, signature);
}
/**
* Generates a Sas signed with a {@link UserDelegationKey}
*
* @param delegationKey {@link UserDelegationKey}
* @param accountName The account name
* @param context Additional context that is passed through the code when generating a SAS.
* @return A String representing the Sas
*/
public String generateUserDelegationSas(UserDelegationKey delegationKey, String accountName, Context context) {
StorageImplUtils.assertNotNull("delegationKey", delegationKey);
StorageImplUtils.assertNotNull("accountName", accountName);
ensureState();
final String canonicalName = getCanonicalName(accountName);
final String stringToSign = stringToSign(delegationKey, canonicalName);
StorageImplUtils.logStringToSign(LOGGER, stringToSign, context);
String signature = StorageImplUtils.computeHMac256(delegationKey.getValue(), stringToSign);
return encode(delegationKey, signature);
}
/**
* Encodes a Sas from the values in this type.
* @param userDelegationKey {@link UserDelegationKey}
* @param signature The signature of the Sas.
* @return A String representing the Sas.
*/
private String encode(UserDelegationKey userDelegationKey, String signature) {
/*
We should be url-encoding each key and each value, but because we know all the keys and values will encode to
themselves, we cheat except for the signature value.
*/
StringBuilder sb = new StringBuilder();
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SERVICE_VERSION, VERSION);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PROTOCOL, this.protocol);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_START_TIME, formatQueryParameterDate(this.startTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_EXPIRY_TIME, formatQueryParameterDate(this.expiryTime));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_IP_RANGE, this.sasIpRange);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, this.identifier);
if (userDelegationKey != null) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_OBJECT_ID,
userDelegationKey.getSignedObjectId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_TENANT_ID,
userDelegationKey.getSignedTenantId());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_START,
formatQueryParameterDate(userDelegationKey.getSignedStart()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY,
formatQueryParameterDate(userDelegationKey.getSignedExpiry()));
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE,
userDelegationKey.getSignedService());
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_KEY_VERSION,
userDelegationKey.getSignedVersion());
/* Only parameters relevant for user delegation SAS. */
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_PREAUTHORIZED_AGENT_OBJECT_ID, this.authorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_AGENT_OBJECT_ID, this.unauthorizedAadObjectId);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CORRELATION_ID, this.correlationId);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_RESOURCE, this.resource);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, this.permissions);
if (this.isDirectory) {
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_DIRECTORY_DEPTH, this.directoryDepth);
}
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_SIGNATURE, signature);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CACHE_CONTROL, this.cacheControl);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_DISPOSITION, this.contentDisposition);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_ENCODING, this.contentEncoding);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_LANGUAGE, this.contentLanguage);
tryAppendQueryParameter(sb, Constants.UrlConstants.SAS_CONTENT_TYPE, this.contentType);
return sb.toString();
}
/**
* Ensures that the builder's properties are in a consistent state.
* 1. If there is no identifier set, ensure expiryTime and permissions are set.
* 2. Resource name is chosen by:
* a. If "BlobName" is _not_ set, it is a container resource.
* b. Otherwise, if "SnapshotId" is set, it is a blob snapshot resource.
* c. Otherwise, if "VersionId" is set, it is a blob version resource.
* d. Otherwise, it is a blob resource.
* 3. Reparse permissions depending on what the resource is. If it is an unrecognized resource, do nothing.
* 4. Ensure saoid is not set when suoid is set and vice versa.
*
* Taken from:
* https:
* https:
*/
private void ensureState() {
if (identifier == null) {
if (expiryTime == null || permissions == null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("If identifier is not set, expiry time "
+ "and permissions must be set"));
}
}
if (CoreUtils.isNullOrEmpty(pathName)) {
resource = SAS_CONTAINER_CONSTANT;
} else {
if (isDirectory) {
resource = SAS_DIRECTORY_CONSTANT;
this.directoryDepth = pathName.split("/").length;
} else {
resource = SAS_BLOB_CONSTANT;
}
}
if (permissions != null) {
switch (resource) {
case SAS_BLOB_CONSTANT:
case SAS_DIRECTORY_CONSTANT:
permissions = PathSasPermission.parse(permissions).toString();
break;
case SAS_CONTAINER_CONSTANT:
permissions = FileSystemSasPermission.parse(permissions).toString();
break;
default:
LOGGER.info("Not re-parsing permissions. Resource type '{}' is unknown.", resource);
break;
}
}
if (this.authorizedAadObjectId != null && this.unauthorizedAadObjectId != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException("agentObjectId and preauthorizedAgentObjectId "
+ "can not both be set."));
}
}
/**
* Computes the canonical name for a container or blob resource for SAS signing.
*/
private String getCanonicalName(String account) {
return CoreUtils.isNullOrEmpty(pathName)
? String.format("/blob/%s/%s", account, fileSystemName)
: String.format("/blob/%s/%s/%s", account, fileSystemName, pathName.replace("\\", "/"));
}
private String stringToSign(String canonicalName) {
if (VERSION.compareTo(DataLakeServiceVersion.V2020_10_02.getVersion()) <= 0) {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
} else {
return String.join("\n",
this.permissions == null ? "" : permissions,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
this.expiryTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
canonicalName,
this.identifier == null ? "" : this.identifier,
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
resource,
"", /* Version segment. */
"",
this.cacheControl == null ? "" : this.cacheControl,
this.contentDisposition == null ? "" : this.contentDisposition,
this.contentEncoding == null ? "" : this.contentEncoding,
this.contentLanguage == null ? "" : this.contentLanguage,
this.contentType == null ? "" : this.contentType
);
}
}
} |
Cool. Then this is done. | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
this.encryptionScope == null ? "" : this.encryptionScope,
""
);
} | this.encryptionScope == null ? "" : this.encryptionScope, | private String stringToSign(final StorageSharedKeyCredential storageSharedKeyCredentials) {
return String.join("\n",
storageSharedKeyCredentials.getAccountName(),
AccountSasPermission.parse(this.permissions).toString(),
this.services,
resourceTypes,
this.startTime == null ? "" : Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime),
Constants.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime),
this.sasIpRange == null ? "" : this.sasIpRange.toString(),
this.protocol == null ? "" : this.protocol.toString(),
VERSION,
""
);
} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} | class level JavaDocs.</p>
*
* @see <a href=https:
*
* @param storageSharedKeyCredentials Credentials for the storage account.
* @return A new {@link AccountSasQueryParameters} |
are we 100% sure that the endpoint will always have `https`? | private String getHostNameFromEndpoint() {
return endpoint.replace("https:
} | return endpoint.replace("https: | private String getHostNameFromEndpoint() throws MalformedURLException {
return new URL(endpoint).getHost();
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
hostName = getHostNameFromEndpoint();
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
try {
hostName = getHostNameFromEndpoint();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new RuntimeException(e.getMessage()));
}
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} |
Good point, we probably also want to check for http. Anything else to look for? | private String getHostNameFromEndpoint() {
return endpoint.replace("https:
} | return endpoint.replace("https: | private String getHostNameFromEndpoint() throws MalformedURLException {
return new URL(endpoint).getHost();
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
hostName = getHostNameFromEndpoint();
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
try {
hostName = getHostNameFromEndpoint();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new RuntimeException(e.getMessage()));
}
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} |
is there a lib in java like URL in C# that does this for us? | private String getHostNameFromEndpoint() {
return endpoint.replace("https:
} | return endpoint.replace("https: | private String getHostNameFromEndpoint() throws MalformedURLException {
return new URL(endpoint).getHost();
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
hostName = getHostNameFromEndpoint();
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
try {
hostName = getHostNameFromEndpoint();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new RuntimeException(e.getMessage()));
}
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} |
Yeah, I recommend you to do as Bharat suggests. Let the library do the heavy lifting. There is URL library where you can create the URL from a String and call `getHost()` to get the host name. | private String getHostNameFromEndpoint() {
return endpoint.replace("https:
} | return endpoint.replace("https: | private String getHostNameFromEndpoint() throws MalformedURLException {
return new URL(endpoint).getHost();
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
hostName = getHostNameFromEndpoint();
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
try {
hostName = getHostNameFromEndpoint();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new RuntimeException(e.getMessage()));
}
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} |
Thanks, updated. | private String getHostNameFromEndpoint() {
return endpoint.replace("https:
} | return endpoint.replace("https: | private String getHostNameFromEndpoint() throws MalformedURLException {
return new URL(endpoint).getHost();
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
hostName = getHostNameFromEndpoint();
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} | class CallingServerClientBuilder {
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private static final String APP_CONFIG_PROPERTIES = "azure-communication-callingserver.properties";
private final ClientLogger logger = new ClientLogger(CallingServerClientBuilder.class);
private String connectionString;
private String endpoint;
private String hostName;
private AzureKeyCredential azureKeyCredential;
private TokenCredential tokenCredential;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions = new HttpLogOptions();
private HttpPipeline pipeline;
private Configuration configuration;
private final Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES);
private final List<HttpPipelinePolicy> customPolicies = new ArrayList<>();
private ClientOptions clientOptions;
private RetryPolicy retryPolicy;
/**
* Set endpoint of the service.
*
* @param endpoint url of the service.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder endpoint(String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null.");
return this;
}
/**
* Set endpoint of the service.
*
* @param pipeline HttpPipeline to use, if a pipeline is not supplied, the
* credential and httpClient fields must be set.
* @return CallingServerClientBuilder object.
*/
public CallingServerClientBuilder pipeline(HttpPipeline pipeline) {
this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null.");
return this;
}
/**
* Sets the {@link TokenCredential} used to authenticate HTTP requests.
*
* @param tokenCredential {@link TokenCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code tokenCredential} is null.
*/
public CallingServerClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null.");
return this;
}
/**
* Sets the {@link AzureKeyCredential} used to authenticate HTTP requests.
*
* @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP
* requests.
* @return Updated {@link CallingServerClientBuilder} object.
* @throws NullPointerException If {@code keyCredential} is null.
*/
CallingServerClientBuilder credential(AzureKeyCredential keyCredential) {
this.azureKeyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null.");
return this;
}
/**
* Set connectionString to use.
*
* @param connectionString connection string to set.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder connectionString(String connectionString) {
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
this.connectionString = connectionString;
return this;
}
/**
* Sets the retry policy to use (using the RetryPolicy type).
*
* @param retryPolicy object to be applied
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null.");
return this;
}
/**
* Sets the configuration object used to retrieve environment configuration
* values during building of the client.
*
* @param configuration Configuration store used to retrieve environment
* configurations.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder configuration(Configuration configuration) {
this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null.");
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving
* HTTP requests/responses.
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Sets the {@link CallingServerServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link CallingServerServiceVersion} of the service to be used when making requests.
* @return Updated CallingServerClientBuilder object
*/
public CallingServerClientBuilder serviceVersion(CallingServerServiceVersion version) {
return this;
}
/**
* Set httpClient to use
*
* @param httpClient httpClient to use, overridden by the pipeline field.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null.");
return this;
}
/**
* Apply additional HttpPipelinePolicy
*
* @param customPolicy HttpPipelinePolicy object to be applied after
* AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."));
return this;
}
/**
* Create asynchronous client applying HMACAuthenticationPolicy,
* UserAgentPolicy, RetryPolicy, and CookiePolicy. Additional HttpPolicies
* specified by additionalPolicies will be applied after them
*
* @return The updated {@link CallingServerClientBuilder} object.
*/
public CallingServerAsyncClient buildAsyncClient() {
return new CallingServerAsyncClient(createServiceImpl());
}
/**
* Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy,
* RetryPolicy, and CookiePolicy. Additional HttpPolicies specified by
* additionalPolicies will be applied after them.
*
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClient buildClient() {
return new CallingServerClient(buildAsyncClient());
}
private AzureCommunicationCallingServerServiceImpl createServiceImpl() {
boolean isConnectionStringSet = connectionString != null && !connectionString.trim().isEmpty();
boolean isEndpointSet = endpoint != null && !endpoint.trim().isEmpty();
boolean isAzureKeyCredentialSet = azureKeyCredential != null;
boolean isTokenCredentialSet = tokenCredential != null;
if (isConnectionStringSet && isEndpointSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'endpoint' are set. Just one may be used."));
}
if (isConnectionStringSet && isAzureKeyCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'connectionString' and 'tokenCredential' are set. Just one may be used."));
}
if (isAzureKeyCredentialSet && isTokenCredentialSet) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'tokenCredential' and 'keyCredential' are set. Just one may be used."));
}
if (isConnectionStringSet) {
CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString);
String endpoint = connectionStringObject.getEndpoint();
String accessKey = connectionStringObject.getAccessKey();
endpoint(endpoint).credential(new AzureKeyCredential(accessKey));
}
Objects.requireNonNull(endpoint);
if (isTokenCredentialSet) {
try {
hostName = getHostNameFromEndpoint();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new RuntimeException(e.getMessage()));
}
}
if (pipeline == null) {
Objects.requireNonNull(httpClient);
}
HttpPipeline builderPipeline = pipeline;
if (pipeline == null) {
builderPipeline = createHttpPipeline(httpClient);
}
AzureCommunicationCallingServerServiceImplBuilder clientBuilder = new AzureCommunicationCallingServerServiceImplBuilder();
clientBuilder.endpoint(endpoint).pipeline(builderPipeline);
return clientBuilder.buildClient();
}
/**
* Allows the user to set a variety of client-related options, such as
* user-agent string, headers, etc.
*
* @param clientOptions object to be applied.
* @return Updated {@link CallingServerClientBuilder} object.
*/
public CallingServerClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
private List<HttpPipelinePolicy> createHttpPipelineAuthPolicies() {
if (tokenCredential != null && azureKeyCredential != null) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Both 'credential' and 'keyCredential' are set. Just one may be used."));
}
List<HttpPipelinePolicy> pipelinePolicies = new ArrayList<>();
if (tokenCredential != null) {
pipelinePolicies.add(new BearerTokenAuthenticationPolicy(tokenCredential,
"https:
pipelinePolicies.add(new TokenCredentialAddHostHeaderPolicy(hostName));
} else if (azureKeyCredential != null) {
pipelinePolicies.add(new HmacAuthenticationPolicy(azureKeyCredential));
} else {
throw logger.logExceptionAsError(
new IllegalArgumentException("Missing credential information while building a client."));
}
return pipelinePolicies;
}
private HttpPipeline createHttpPipeline(HttpClient httpClient) {
if (pipeline != null) {
return pipeline;
}
List<HttpPipelinePolicy> policyList = new ArrayList<>();
ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions;
HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions;
String applicationId = null;
if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) {
applicationId = buildClientOptions.getApplicationId();
} else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) {
applicationId = buildLogOptions.getApplicationId();
}
String clientName = properties.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion");
policyList.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration));
policyList.add(new RequestIdPolicy());
policyList.add((retryPolicy == null) ? new RetryPolicy() : retryPolicy);
policyList.add(new RedirectPolicy());
policyList.addAll(createHttpPipelineAuthPolicies());
policyList.add(new CookiePolicy());
if (!customPolicies.isEmpty()) {
policyList.addAll(customPolicies);
}
policyList.add(new HttpLoggingPolicy(getHttpLogOptions()));
return new HttpPipelineBuilder().policies(policyList.toArray(new HttpPipelinePolicy[0])).httpClient(httpClient)
.build();
}
private HttpLogOptions getHttpLogOptions() {
if (httpLogOptions == null) {
httpLogOptions = new HttpLogOptions();
}
return httpLogOptions;
}
} |
Do we not use builders for this? `new KeyRotationLifetimeAction.Builder().setTimeAfterCreate("P90D").build()`? Again, just asking so I can better learn how we Java | public void updateKeyRotationPolicy() {
KeyClient keyClient = createClient();
List<KeyRotationLifetimeAction> lifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction rotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction notifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
lifetimeActions.add(rotateLifetimeAction);
lifetimeActions.add(notifyLifetimeAction);
KeyRotationPolicyProperties policyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(lifetimeActions)
.setExpiryTime("P6M");
KeyRotationPolicy keyRotationPolicy =
keyClient.updateKeyRotationPolicy("keyName", policyProperties);
System.out.printf("Updated key rotation policy with id: %s%n", keyRotationPolicy.getId());
List<KeyRotationLifetimeAction> myLifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction myRotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction myNotifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
myLifetimeActions.add(myRotateLifetimeAction);
myLifetimeActions.add(myNotifyLifetimeAction);
KeyRotationPolicyProperties myPolicyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(myLifetimeActions)
.setExpiryTime("P6M");
Response<KeyRotationPolicy> keyRotationPolicyResponse = keyClient.updateKeyRotationPolicyWithResponse(
"keyName", myPolicyProperties, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Updated key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
} | KeyRotationLifetimeAction myRotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE) | public void updateKeyRotationPolicy() {
KeyClient keyClient = createClient();
List<KeyRotationLifetimeAction> lifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction rotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction notifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
lifetimeActions.add(rotateLifetimeAction);
lifetimeActions.add(notifyLifetimeAction);
KeyRotationPolicyProperties policyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(lifetimeActions)
.setExpiryTime("P6M");
KeyRotationPolicy keyRotationPolicy =
keyClient.updateKeyRotationPolicy("keyName", policyProperties);
System.out.printf("Updated key rotation policy with id: %s%n", keyRotationPolicy.getId());
List<KeyRotationLifetimeAction> myLifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction myRotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction myNotifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
myLifetimeActions.add(myRotateLifetimeAction);
myLifetimeActions.add(myNotifyLifetimeAction);
KeyRotationPolicyProperties myPolicyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(myLifetimeActions)
.setExpiryTime("P6M");
Response<KeyRotationPolicy> keyRotationPolicyResponse = keyClient.updateKeyRotationPolicyWithResponse(
"keyName", myPolicyProperties, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Updated key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
} | class KeyClientJavaDocCodeSnippets {
/**
* Generates a code sample for creating a {@link KeyClient}.
*
* @return An instance of {@link KeyClient}.
*/
public KeyClient createClient() {
KeyClient keyClient = new KeyClientBuilder()
.vaultUrl("https:
.credential(new DefaultAzureCredentialBuilder().build())
.buildClient();
return keyClient;
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.createKey("keyName", KeyType.EC);
System.out.printf("Created key with name: %s and id: %s%n", key.getName(), key.getId());
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey optionsKey = keyClient.createKey(createKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", optionsKey.getName(), optionsKey.getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey rsaKey = keyClient.createRsaKey(createRsaKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", rsaKey.getName(), rsaKey.getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey ecKey = keyClient.createEcKey(createEcKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", ecKey.getName(), ecKey.getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey octKey = keyClient.createOctKey(createOctKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", octKey.getName(), octKey.getId());
}
/**
* Generates code samples for using {@link KeyAsyncClient
* {@link KeyAsyncClient
* {@link KeyAsyncClient
*/
public void importKey() {
KeyClient keyClient = createClient();
JsonWebKey jsonWebKeyToImport = new JsonWebKey();
KeyVaultKey key = keyClient.importKey("keyName", jsonWebKeyToImport);
System.out.printf("Imported key with name: %s and id: %s%n", key.getName(), key.getId());
ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
KeyVaultKey importedKey = keyClient.importKey(options);
System.out.printf("Imported key with name: %s and id: %s%n", importedKey.getName(),
importedKey.getId());
ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
Response<KeyVaultKey> response =
keyClient.importKeyWithResponse(importKeyOptions, new Context("key1", "value1"));
System.out.printf("Imported key with name: %s and id: %s%n", response.getValue().getName(),
response.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginDeleteKey() {
KeyClient keyClient = createClient();
SyncPoller<DeletedKey, Void> deleteKeyPoller = keyClient.beginDeleteKey("keyName");
PollResponse<DeletedKey> deleteKeyPollResponse = deleteKeyPoller.poll();
DeletedKey deletedKey = deleteKeyPollResponse.getValue();
System.out.printf("Key delete date: %s%n" + deletedKey.getDeletedOn());
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
deleteKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKey() {
KeyClient keyClient = createClient();
DeletedKey deletedKey = keyClient.getDeletedKey("keyName");
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKeyWithResponse() {
KeyClient keyClient = createClient();
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createKeyResponse =
keyClient.createKeyWithResponse(createKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createKeyResponse.getValue().getName(),
createKeyResponse.getValue().getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createRsaKeyResponse =
keyClient.createRsaKeyWithResponse(createRsaKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createRsaKeyResponse.getValue().getName(),
createRsaKeyResponse.getValue().getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createEcKeyResponse =
keyClient.createEcKeyWithResponse(createEcKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createEcKeyResponse.getValue().getName(),
createEcKeyResponse.getValue().getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createOctKeyResponse =
keyClient.createOctKeyWithResponse(createOctKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createOctKeyResponse.getValue().getName(),
createOctKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getKeyWithResponse() {
KeyClient keyClient = createClient();
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
Response<KeyVaultKey> getKeyResponse =
keyClient.getKeyWithResponse("keyName", keyVersion, new Context("key1", "value1"));
System.out.printf("Retrieved key with name: %s and: id %s%n", getKeyResponse.getValue().getName(),
getKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void getKey() {
KeyClient keyClient = createClient();
KeyVaultKey keyWithVersionValue = keyClient.getKey("keyName");
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersionValue.getName(),
keyWithVersionValue.getId());
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
KeyVaultKey keyWithVersion = keyClient.getKey("keyName", keyVersion);
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersion.getName(),
keyWithVersion.getId());
}
/**
* Generates a code sample for using
* {@link KeyClient
*/
public void updateKeyPropertiesWithResponse() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
Response<KeyVaultKey> updateKeyResponse =
keyClient.updateKeyPropertiesWithResponse(key.getProperties(), new Context("key1", "value1"),
KeyOperation.ENCRYPT, KeyOperation.DECRYPT);
System.out.printf("Updated key with name: %s and id: %s%n", updateKeyResponse.getValue().getName(),
updateKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void updateKeyProperties() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
KeyVaultKey updatedKey = keyClient.updateKeyProperties(key.getProperties(), KeyOperation.ENCRYPT,
KeyOperation.DECRYPT);
System.out.printf("Key is updated with name %s and id %s %n", updatedKey.getName(), updatedKey.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<DeletedKey> deletedKeyResponse =
keyClient.getDeletedKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Deleted key with recovery id: %s%n", deletedKeyResponse.getValue().getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKey() {
KeyClient keyClient = createClient();
keyClient.purgeDeletedKey("deletedKeyName");
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<Void> purgeDeletedKeyResponse = keyClient.purgeDeletedKeyWithResponse("deletedKeyName",
new Context("key1", "value1"));
System.out.printf("Purge response status code: %d%n", purgeDeletedKeyResponse.getStatusCode());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginRecoverDeletedKey() {
KeyClient keyClient = createClient();
SyncPoller<KeyVaultKey, Void> recoverKeyPoller = keyClient.beginRecoverDeletedKey("deletedKeyName");
PollResponse<KeyVaultKey> recoverKeyPollResponse = recoverKeyPoller.poll();
KeyVaultKey recoveredKey = recoverKeyPollResponse.getValue();
System.out.printf("Recovered key name: %s%n", recoveredKey.getName());
System.out.printf("Recovered key id: %s%n", recoveredKey.getId());
recoverKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKey() {
KeyClient keyClient = createClient();
byte[] keyBackup = keyClient.backupKey("keyName");
System.out.printf("Key backup byte array length: %s%n", keyBackup.length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKeyWithResponse() {
KeyClient keyClient = createClient();
Response<byte[]> backupKeyResponse = keyClient.backupKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Key backup byte array length: %s%n", backupKeyResponse.getValue().length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackup() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
KeyVaultKey keyResponse = keyClient.restoreKeyBackup(keyBackupByteArray);
System.out.printf("Restored key with name: %s and: id %s%n", keyResponse.getName(), keyResponse.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackupWithResponse() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
Response<KeyVaultKey> keyResponse = keyClient.restoreKeyBackupWithResponse(keyBackupByteArray,
new Context("key1", "value1"));
System.out.printf("Restored key with name: %s and: id %s%n",
keyResponse.getValue().getName(), keyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeys() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys()) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys(new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
}
keyClient.listPropertiesOfKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties -> {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
});
});
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listDeletedKeys() {
KeyClient keyClient = createClient();
for (DeletedKey deletedKey : keyClient.listDeletedKeys()) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context("key1", "value1"))) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
keyClient.listDeletedKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(deletedKey ->
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId()));
});
}
/**
* Generates code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeyVersions() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName")) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName", new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
keyClient.listPropertiesOfKeyVersions("keyName").iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties ->
System.out.printf("Key name: %s. Key version: %s.%n", keyProperties.getName(),
keyProperties.getVersion()));
});
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getRandomBytes() {
KeyClient keyClient = createClient();
int amount = 16;
RandomBytes randomBytes = keyClient.getRandomBytes(amount);
System.out.printf("Retrieved %d random bytes: %s%n", amount, Arrays.toString(randomBytes.getBytes()));
int amountOfBytes = 16;
Response<RandomBytes> response =
keyClient.getRandomBytesWithResponse(amountOfBytes, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved %d random bytes: %s%n",
response.getStatusCode(), amountOfBytes, Arrays.toString(response.getValue().getBytes()));
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void releaseKey() {
KeyClient keyClient = createClient();
String target = "someAttestationToken";
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey("keyName", target);
System.out.printf("Signed object containing released key: %s%n", releaseKeyResult);
String myKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String myTarget = "someAttestationToken";
ReleaseKeyResult releaseKeyVersionResult = keyClient.releaseKey("keyName", myKeyVersion, myTarget);
System.out.printf("Signed object containing released key: %s%n", releaseKeyVersionResult);
String releaseKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String releaseTarget = "someAttestationToken";
ReleaseKeyOptions releaseKeyOptions = new ReleaseKeyOptions()
.setAlgorithm(KeyExportEncryptionAlgorithm.RSA_AES_KEY_WRAP_256)
.setNonce("someNonce");
Response<ReleaseKeyResult> releaseKeyResultResponse =
keyClient.releaseKeyWithResponse("keyName", releaseKeyVersion, releaseTarget, releaseKeyOptions,
new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Signed object containing"
+ "released key: %s%n", releaseKeyResultResponse.getStatusCode(),
releaseKeyResultResponse.getValue().getValue());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void rotateKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.rotateKey("keyName");
System.out.printf("Rotated key with name: %s and version:%s%n", key.getName(),
key.getProperties().getVersion());
Response<KeyVaultKey> keyResponse = keyClient.rotateKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Rotated key with name: %s and"
+ "version: %s%n", keyResponse.getStatusCode(), keyResponse.getValue().getName(),
keyResponse.getValue().getProperties().getVersion());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getKeyRotationPolicy() {
KeyClient keyClient = createClient();
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy("keyName");
System.out.printf("Retrieved key rotation policy with id: %s%n", keyRotationPolicy.getId());
Response<KeyRotationPolicy> keyRotationPolicyResponse =
keyClient.getKeyRotationPolicyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
}
/**
* Generates code samples for using {@link KeyClient
* and {@link KeyClient
*/
} | class KeyClientJavaDocCodeSnippets {
/**
* Generates a code sample for creating a {@link KeyClient}.
*
* @return An instance of {@link KeyClient}.
*/
public KeyClient createClient() {
KeyClient keyClient = new KeyClientBuilder()
.vaultUrl("https:
.credential(new DefaultAzureCredentialBuilder().build())
.buildClient();
return keyClient;
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.createKey("keyName", KeyType.EC);
System.out.printf("Created key with name: %s and id: %s%n", key.getName(), key.getId());
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey optionsKey = keyClient.createKey(createKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", optionsKey.getName(), optionsKey.getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey rsaKey = keyClient.createRsaKey(createRsaKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", rsaKey.getName(), rsaKey.getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey ecKey = keyClient.createEcKey(createEcKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", ecKey.getName(), ecKey.getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey octKey = keyClient.createOctKey(createOctKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", octKey.getName(), octKey.getId());
}
/**
* Generates code samples for using {@link KeyAsyncClient
* {@link KeyAsyncClient
* {@link KeyAsyncClient
*/
public void importKey() {
KeyClient keyClient = createClient();
JsonWebKey jsonWebKeyToImport = new JsonWebKey();
KeyVaultKey key = keyClient.importKey("keyName", jsonWebKeyToImport);
System.out.printf("Imported key with name: %s and id: %s%n", key.getName(), key.getId());
ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
KeyVaultKey importedKey = keyClient.importKey(options);
System.out.printf("Imported key with name: %s and id: %s%n", importedKey.getName(),
importedKey.getId());
ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
Response<KeyVaultKey> response =
keyClient.importKeyWithResponse(importKeyOptions, new Context("key1", "value1"));
System.out.printf("Imported key with name: %s and id: %s%n", response.getValue().getName(),
response.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginDeleteKey() {
KeyClient keyClient = createClient();
SyncPoller<DeletedKey, Void> deleteKeyPoller = keyClient.beginDeleteKey("keyName");
PollResponse<DeletedKey> deleteKeyPollResponse = deleteKeyPoller.poll();
DeletedKey deletedKey = deleteKeyPollResponse.getValue();
System.out.printf("Key delete date: %s%n" + deletedKey.getDeletedOn());
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
deleteKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKey() {
KeyClient keyClient = createClient();
DeletedKey deletedKey = keyClient.getDeletedKey("keyName");
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKeyWithResponse() {
KeyClient keyClient = createClient();
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createKeyResponse =
keyClient.createKeyWithResponse(createKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createKeyResponse.getValue().getName(),
createKeyResponse.getValue().getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createRsaKeyResponse =
keyClient.createRsaKeyWithResponse(createRsaKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createRsaKeyResponse.getValue().getName(),
createRsaKeyResponse.getValue().getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createEcKeyResponse =
keyClient.createEcKeyWithResponse(createEcKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createEcKeyResponse.getValue().getName(),
createEcKeyResponse.getValue().getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createOctKeyResponse =
keyClient.createOctKeyWithResponse(createOctKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createOctKeyResponse.getValue().getName(),
createOctKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getKeyWithResponse() {
KeyClient keyClient = createClient();
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
Response<KeyVaultKey> getKeyResponse =
keyClient.getKeyWithResponse("keyName", keyVersion, new Context("key1", "value1"));
System.out.printf("Retrieved key with name: %s and: id %s%n", getKeyResponse.getValue().getName(),
getKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void getKey() {
KeyClient keyClient = createClient();
KeyVaultKey keyWithVersionValue = keyClient.getKey("keyName");
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersionValue.getName(),
keyWithVersionValue.getId());
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
KeyVaultKey keyWithVersion = keyClient.getKey("keyName", keyVersion);
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersion.getName(),
keyWithVersion.getId());
}
/**
* Generates a code sample for using
* {@link KeyClient
*/
public void updateKeyPropertiesWithResponse() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
Response<KeyVaultKey> updateKeyResponse =
keyClient.updateKeyPropertiesWithResponse(key.getProperties(), new Context("key1", "value1"),
KeyOperation.ENCRYPT, KeyOperation.DECRYPT);
System.out.printf("Updated key with name: %s and id: %s%n", updateKeyResponse.getValue().getName(),
updateKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void updateKeyProperties() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
KeyVaultKey updatedKey = keyClient.updateKeyProperties(key.getProperties(), KeyOperation.ENCRYPT,
KeyOperation.DECRYPT);
System.out.printf("Key is updated with name %s and id %s %n", updatedKey.getName(), updatedKey.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<DeletedKey> deletedKeyResponse =
keyClient.getDeletedKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Deleted key with recovery id: %s%n", deletedKeyResponse.getValue().getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKey() {
KeyClient keyClient = createClient();
keyClient.purgeDeletedKey("deletedKeyName");
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<Void> purgeDeletedKeyResponse = keyClient.purgeDeletedKeyWithResponse("deletedKeyName",
new Context("key1", "value1"));
System.out.printf("Purge response status code: %d%n", purgeDeletedKeyResponse.getStatusCode());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginRecoverDeletedKey() {
KeyClient keyClient = createClient();
SyncPoller<KeyVaultKey, Void> recoverKeyPoller = keyClient.beginRecoverDeletedKey("deletedKeyName");
PollResponse<KeyVaultKey> recoverKeyPollResponse = recoverKeyPoller.poll();
KeyVaultKey recoveredKey = recoverKeyPollResponse.getValue();
System.out.printf("Recovered key name: %s%n", recoveredKey.getName());
System.out.printf("Recovered key id: %s%n", recoveredKey.getId());
recoverKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKey() {
KeyClient keyClient = createClient();
byte[] keyBackup = keyClient.backupKey("keyName");
System.out.printf("Key backup byte array length: %s%n", keyBackup.length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKeyWithResponse() {
KeyClient keyClient = createClient();
Response<byte[]> backupKeyResponse = keyClient.backupKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Key backup byte array length: %s%n", backupKeyResponse.getValue().length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackup() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
KeyVaultKey keyResponse = keyClient.restoreKeyBackup(keyBackupByteArray);
System.out.printf("Restored key with name: %s and: id %s%n", keyResponse.getName(), keyResponse.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackupWithResponse() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
Response<KeyVaultKey> keyResponse = keyClient.restoreKeyBackupWithResponse(keyBackupByteArray,
new Context("key1", "value1"));
System.out.printf("Restored key with name: %s and: id %s%n",
keyResponse.getValue().getName(), keyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeys() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys()) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys(new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
}
keyClient.listPropertiesOfKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties -> {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
});
});
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listDeletedKeys() {
KeyClient keyClient = createClient();
for (DeletedKey deletedKey : keyClient.listDeletedKeys()) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context("key1", "value1"))) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
keyClient.listDeletedKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(deletedKey ->
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId()));
});
}
/**
* Generates code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeyVersions() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName")) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName", new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
keyClient.listPropertiesOfKeyVersions("keyName").iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties ->
System.out.printf("Key name: %s. Key version: %s.%n", keyProperties.getName(),
keyProperties.getVersion()));
});
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getRandomBytes() {
KeyClient keyClient = createClient();
int amount = 16;
RandomBytes randomBytes = keyClient.getRandomBytes(amount);
System.out.printf("Retrieved %d random bytes: %s%n", amount, Arrays.toString(randomBytes.getBytes()));
int amountOfBytes = 16;
Response<RandomBytes> response =
keyClient.getRandomBytesWithResponse(amountOfBytes, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved %d random bytes: %s%n",
response.getStatusCode(), amountOfBytes, Arrays.toString(response.getValue().getBytes()));
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void releaseKey() {
KeyClient keyClient = createClient();
String target = "someAttestationToken";
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey("keyName", target);
System.out.printf("Signed object containing released key: %s%n", releaseKeyResult);
String myKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String myTarget = "someAttestationToken";
ReleaseKeyResult releaseKeyVersionResult = keyClient.releaseKey("keyName", myKeyVersion, myTarget);
System.out.printf("Signed object containing released key: %s%n", releaseKeyVersionResult);
String releaseKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String releaseTarget = "someAttestationToken";
ReleaseKeyOptions releaseKeyOptions = new ReleaseKeyOptions()
.setAlgorithm(KeyExportEncryptionAlgorithm.RSA_AES_KEY_WRAP_256)
.setNonce("someNonce");
Response<ReleaseKeyResult> releaseKeyResultResponse =
keyClient.releaseKeyWithResponse("keyName", releaseKeyVersion, releaseTarget, releaseKeyOptions,
new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Signed object containing"
+ "released key: %s%n", releaseKeyResultResponse.getStatusCode(),
releaseKeyResultResponse.getValue().getValue());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void rotateKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.rotateKey("keyName");
System.out.printf("Rotated key with name: %s and version:%s%n", key.getName(),
key.getProperties().getVersion());
Response<KeyVaultKey> keyResponse = keyClient.rotateKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Rotated key with name: %s and"
+ "version: %s%n", keyResponse.getStatusCode(), keyResponse.getValue().getName(),
keyResponse.getValue().getProperties().getVersion());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getKeyRotationPolicy() {
KeyClient keyClient = createClient();
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy("keyName");
System.out.printf("Retrieved key rotation policy with id: %s%n", keyRotationPolicy.getId());
Response<KeyRotationPolicy> keyRotationPolicyResponse =
keyClient.getKeyRotationPolicyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
}
/**
* Generates code samples for using {@link KeyClient
* and {@link KeyClient
*/
} |
We don't use builders for models such as this, but we do use a fluent pattern for all of them so we can chain set operations, for example: ``` java ModelClass myModel= new ModelClass(requiredPoperty1, requiredproperty2) .setOptionalProperty("some-value"); .setOtherOptionalProperty("other-value"); ``` | public void updateKeyRotationPolicy() {
KeyClient keyClient = createClient();
List<KeyRotationLifetimeAction> lifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction rotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction notifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
lifetimeActions.add(rotateLifetimeAction);
lifetimeActions.add(notifyLifetimeAction);
KeyRotationPolicyProperties policyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(lifetimeActions)
.setExpiryTime("P6M");
KeyRotationPolicy keyRotationPolicy =
keyClient.updateKeyRotationPolicy("keyName", policyProperties);
System.out.printf("Updated key rotation policy with id: %s%n", keyRotationPolicy.getId());
List<KeyRotationLifetimeAction> myLifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction myRotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction myNotifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
myLifetimeActions.add(myRotateLifetimeAction);
myLifetimeActions.add(myNotifyLifetimeAction);
KeyRotationPolicyProperties myPolicyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(myLifetimeActions)
.setExpiryTime("P6M");
Response<KeyRotationPolicy> keyRotationPolicyResponse = keyClient.updateKeyRotationPolicyWithResponse(
"keyName", myPolicyProperties, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Updated key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
} | KeyRotationLifetimeAction myRotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE) | public void updateKeyRotationPolicy() {
KeyClient keyClient = createClient();
List<KeyRotationLifetimeAction> lifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction rotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction notifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
lifetimeActions.add(rotateLifetimeAction);
lifetimeActions.add(notifyLifetimeAction);
KeyRotationPolicyProperties policyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(lifetimeActions)
.setExpiryTime("P6M");
KeyRotationPolicy keyRotationPolicy =
keyClient.updateKeyRotationPolicy("keyName", policyProperties);
System.out.printf("Updated key rotation policy with id: %s%n", keyRotationPolicy.getId());
List<KeyRotationLifetimeAction> myLifetimeActions = new ArrayList<>();
KeyRotationLifetimeAction myRotateLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.ROTATE)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
KeyRotationLifetimeAction myNotifyLifetimeAction = new KeyRotationLifetimeAction(KeyRotationPolicyAction.NOTIFY)
.setTimeAfterCreate("P90D")
.setTimeBeforeExpiry("P45D");
myLifetimeActions.add(myRotateLifetimeAction);
myLifetimeActions.add(myNotifyLifetimeAction);
KeyRotationPolicyProperties myPolicyProperties = new KeyRotationPolicyProperties()
.setLifetimeActions(myLifetimeActions)
.setExpiryTime("P6M");
Response<KeyRotationPolicy> keyRotationPolicyResponse = keyClient.updateKeyRotationPolicyWithResponse(
"keyName", myPolicyProperties, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Updated key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
} | class KeyClientJavaDocCodeSnippets {
/**
* Generates a code sample for creating a {@link KeyClient}.
*
* @return An instance of {@link KeyClient}.
*/
public KeyClient createClient() {
KeyClient keyClient = new KeyClientBuilder()
.vaultUrl("https:
.credential(new DefaultAzureCredentialBuilder().build())
.buildClient();
return keyClient;
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.createKey("keyName", KeyType.EC);
System.out.printf("Created key with name: %s and id: %s%n", key.getName(), key.getId());
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey optionsKey = keyClient.createKey(createKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", optionsKey.getName(), optionsKey.getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey rsaKey = keyClient.createRsaKey(createRsaKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", rsaKey.getName(), rsaKey.getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey ecKey = keyClient.createEcKey(createEcKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", ecKey.getName(), ecKey.getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey octKey = keyClient.createOctKey(createOctKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", octKey.getName(), octKey.getId());
}
/**
* Generates code samples for using {@link KeyAsyncClient
* {@link KeyAsyncClient
* {@link KeyAsyncClient
*/
public void importKey() {
KeyClient keyClient = createClient();
JsonWebKey jsonWebKeyToImport = new JsonWebKey();
KeyVaultKey key = keyClient.importKey("keyName", jsonWebKeyToImport);
System.out.printf("Imported key with name: %s and id: %s%n", key.getName(), key.getId());
ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
KeyVaultKey importedKey = keyClient.importKey(options);
System.out.printf("Imported key with name: %s and id: %s%n", importedKey.getName(),
importedKey.getId());
ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
Response<KeyVaultKey> response =
keyClient.importKeyWithResponse(importKeyOptions, new Context("key1", "value1"));
System.out.printf("Imported key with name: %s and id: %s%n", response.getValue().getName(),
response.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginDeleteKey() {
KeyClient keyClient = createClient();
SyncPoller<DeletedKey, Void> deleteKeyPoller = keyClient.beginDeleteKey("keyName");
PollResponse<DeletedKey> deleteKeyPollResponse = deleteKeyPoller.poll();
DeletedKey deletedKey = deleteKeyPollResponse.getValue();
System.out.printf("Key delete date: %s%n" + deletedKey.getDeletedOn());
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
deleteKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKey() {
KeyClient keyClient = createClient();
DeletedKey deletedKey = keyClient.getDeletedKey("keyName");
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKeyWithResponse() {
KeyClient keyClient = createClient();
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createKeyResponse =
keyClient.createKeyWithResponse(createKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createKeyResponse.getValue().getName(),
createKeyResponse.getValue().getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createRsaKeyResponse =
keyClient.createRsaKeyWithResponse(createRsaKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createRsaKeyResponse.getValue().getName(),
createRsaKeyResponse.getValue().getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createEcKeyResponse =
keyClient.createEcKeyWithResponse(createEcKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createEcKeyResponse.getValue().getName(),
createEcKeyResponse.getValue().getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createOctKeyResponse =
keyClient.createOctKeyWithResponse(createOctKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createOctKeyResponse.getValue().getName(),
createOctKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getKeyWithResponse() {
KeyClient keyClient = createClient();
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
Response<KeyVaultKey> getKeyResponse =
keyClient.getKeyWithResponse("keyName", keyVersion, new Context("key1", "value1"));
System.out.printf("Retrieved key with name: %s and: id %s%n", getKeyResponse.getValue().getName(),
getKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void getKey() {
KeyClient keyClient = createClient();
KeyVaultKey keyWithVersionValue = keyClient.getKey("keyName");
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersionValue.getName(),
keyWithVersionValue.getId());
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
KeyVaultKey keyWithVersion = keyClient.getKey("keyName", keyVersion);
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersion.getName(),
keyWithVersion.getId());
}
/**
* Generates a code sample for using
* {@link KeyClient
*/
public void updateKeyPropertiesWithResponse() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
Response<KeyVaultKey> updateKeyResponse =
keyClient.updateKeyPropertiesWithResponse(key.getProperties(), new Context("key1", "value1"),
KeyOperation.ENCRYPT, KeyOperation.DECRYPT);
System.out.printf("Updated key with name: %s and id: %s%n", updateKeyResponse.getValue().getName(),
updateKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void updateKeyProperties() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
KeyVaultKey updatedKey = keyClient.updateKeyProperties(key.getProperties(), KeyOperation.ENCRYPT,
KeyOperation.DECRYPT);
System.out.printf("Key is updated with name %s and id %s %n", updatedKey.getName(), updatedKey.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<DeletedKey> deletedKeyResponse =
keyClient.getDeletedKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Deleted key with recovery id: %s%n", deletedKeyResponse.getValue().getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKey() {
KeyClient keyClient = createClient();
keyClient.purgeDeletedKey("deletedKeyName");
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<Void> purgeDeletedKeyResponse = keyClient.purgeDeletedKeyWithResponse("deletedKeyName",
new Context("key1", "value1"));
System.out.printf("Purge response status code: %d%n", purgeDeletedKeyResponse.getStatusCode());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginRecoverDeletedKey() {
KeyClient keyClient = createClient();
SyncPoller<KeyVaultKey, Void> recoverKeyPoller = keyClient.beginRecoverDeletedKey("deletedKeyName");
PollResponse<KeyVaultKey> recoverKeyPollResponse = recoverKeyPoller.poll();
KeyVaultKey recoveredKey = recoverKeyPollResponse.getValue();
System.out.printf("Recovered key name: %s%n", recoveredKey.getName());
System.out.printf("Recovered key id: %s%n", recoveredKey.getId());
recoverKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKey() {
KeyClient keyClient = createClient();
byte[] keyBackup = keyClient.backupKey("keyName");
System.out.printf("Key backup byte array length: %s%n", keyBackup.length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKeyWithResponse() {
KeyClient keyClient = createClient();
Response<byte[]> backupKeyResponse = keyClient.backupKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Key backup byte array length: %s%n", backupKeyResponse.getValue().length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackup() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
KeyVaultKey keyResponse = keyClient.restoreKeyBackup(keyBackupByteArray);
System.out.printf("Restored key with name: %s and: id %s%n", keyResponse.getName(), keyResponse.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackupWithResponse() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
Response<KeyVaultKey> keyResponse = keyClient.restoreKeyBackupWithResponse(keyBackupByteArray,
new Context("key1", "value1"));
System.out.printf("Restored key with name: %s and: id %s%n",
keyResponse.getValue().getName(), keyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeys() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys()) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys(new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
}
keyClient.listPropertiesOfKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties -> {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
});
});
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listDeletedKeys() {
KeyClient keyClient = createClient();
for (DeletedKey deletedKey : keyClient.listDeletedKeys()) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context("key1", "value1"))) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
keyClient.listDeletedKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(deletedKey ->
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId()));
});
}
/**
* Generates code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeyVersions() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName")) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName", new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
keyClient.listPropertiesOfKeyVersions("keyName").iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties ->
System.out.printf("Key name: %s. Key version: %s.%n", keyProperties.getName(),
keyProperties.getVersion()));
});
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getRandomBytes() {
KeyClient keyClient = createClient();
int amount = 16;
RandomBytes randomBytes = keyClient.getRandomBytes(amount);
System.out.printf("Retrieved %d random bytes: %s%n", amount, Arrays.toString(randomBytes.getBytes()));
int amountOfBytes = 16;
Response<RandomBytes> response =
keyClient.getRandomBytesWithResponse(amountOfBytes, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved %d random bytes: %s%n",
response.getStatusCode(), amountOfBytes, Arrays.toString(response.getValue().getBytes()));
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void releaseKey() {
KeyClient keyClient = createClient();
String target = "someAttestationToken";
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey("keyName", target);
System.out.printf("Signed object containing released key: %s%n", releaseKeyResult);
String myKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String myTarget = "someAttestationToken";
ReleaseKeyResult releaseKeyVersionResult = keyClient.releaseKey("keyName", myKeyVersion, myTarget);
System.out.printf("Signed object containing released key: %s%n", releaseKeyVersionResult);
String releaseKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String releaseTarget = "someAttestationToken";
ReleaseKeyOptions releaseKeyOptions = new ReleaseKeyOptions()
.setAlgorithm(KeyExportEncryptionAlgorithm.RSA_AES_KEY_WRAP_256)
.setNonce("someNonce");
Response<ReleaseKeyResult> releaseKeyResultResponse =
keyClient.releaseKeyWithResponse("keyName", releaseKeyVersion, releaseTarget, releaseKeyOptions,
new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Signed object containing"
+ "released key: %s%n", releaseKeyResultResponse.getStatusCode(),
releaseKeyResultResponse.getValue().getValue());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void rotateKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.rotateKey("keyName");
System.out.printf("Rotated key with name: %s and version:%s%n", key.getName(),
key.getProperties().getVersion());
Response<KeyVaultKey> keyResponse = keyClient.rotateKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Rotated key with name: %s and"
+ "version: %s%n", keyResponse.getStatusCode(), keyResponse.getValue().getName(),
keyResponse.getValue().getProperties().getVersion());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getKeyRotationPolicy() {
KeyClient keyClient = createClient();
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy("keyName");
System.out.printf("Retrieved key rotation policy with id: %s%n", keyRotationPolicy.getId());
Response<KeyRotationPolicy> keyRotationPolicyResponse =
keyClient.getKeyRotationPolicyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
}
/**
* Generates code samples for using {@link KeyClient
* and {@link KeyClient
*/
} | class KeyClientJavaDocCodeSnippets {
/**
* Generates a code sample for creating a {@link KeyClient}.
*
* @return An instance of {@link KeyClient}.
*/
public KeyClient createClient() {
KeyClient keyClient = new KeyClientBuilder()
.vaultUrl("https:
.credential(new DefaultAzureCredentialBuilder().build())
.buildClient();
return keyClient;
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.createKey("keyName", KeyType.EC);
System.out.printf("Created key with name: %s and id: %s%n", key.getName(), key.getId());
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey optionsKey = keyClient.createKey(createKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", optionsKey.getName(), optionsKey.getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey rsaKey = keyClient.createRsaKey(createRsaKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", rsaKey.getName(), rsaKey.getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey ecKey = keyClient.createEcKey(createEcKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", ecKey.getName(), ecKey.getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
KeyVaultKey octKey = keyClient.createOctKey(createOctKeyOptions);
System.out.printf("Created key with name: %s and id: %s%n", octKey.getName(), octKey.getId());
}
/**
* Generates code samples for using {@link KeyAsyncClient
* {@link KeyAsyncClient
* {@link KeyAsyncClient
*/
public void importKey() {
KeyClient keyClient = createClient();
JsonWebKey jsonWebKeyToImport = new JsonWebKey();
KeyVaultKey key = keyClient.importKey("keyName", jsonWebKeyToImport);
System.out.printf("Imported key with name: %s and id: %s%n", key.getName(), key.getId());
ImportKeyOptions options = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
KeyVaultKey importedKey = keyClient.importKey(options);
System.out.printf("Imported key with name: %s and id: %s%n", importedKey.getName(),
importedKey.getId());
ImportKeyOptions importKeyOptions = new ImportKeyOptions("keyName", jsonWebKeyToImport)
.setHardwareProtected(false);
Response<KeyVaultKey> response =
keyClient.importKeyWithResponse(importKeyOptions, new Context("key1", "value1"));
System.out.printf("Imported key with name: %s and id: %s%n", response.getValue().getName(),
response.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginDeleteKey() {
KeyClient keyClient = createClient();
SyncPoller<DeletedKey, Void> deleteKeyPoller = keyClient.beginDeleteKey("keyName");
PollResponse<DeletedKey> deleteKeyPollResponse = deleteKeyPoller.poll();
DeletedKey deletedKey = deleteKeyPollResponse.getValue();
System.out.printf("Key delete date: %s%n" + deletedKey.getDeletedOn());
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
deleteKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKey() {
KeyClient keyClient = createClient();
DeletedKey deletedKey = keyClient.getDeletedKey("keyName");
System.out.printf("Deleted key's recovery id: %s%n", deletedKey.getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void createKeyWithResponse() {
KeyClient keyClient = createClient();
CreateKeyOptions createKeyOptions = new CreateKeyOptions("keyName", KeyType.RSA)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createKeyResponse =
keyClient.createKeyWithResponse(createKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createKeyResponse.getValue().getName(),
createKeyResponse.getValue().getId());
CreateRsaKeyOptions createRsaKeyOptions = new CreateRsaKeyOptions("keyName")
.setKeySize(2048)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createRsaKeyResponse =
keyClient.createRsaKeyWithResponse(createRsaKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createRsaKeyResponse.getValue().getName(),
createRsaKeyResponse.getValue().getId());
CreateEcKeyOptions createEcKeyOptions = new CreateEcKeyOptions("keyName")
.setCurveName(KeyCurveName.P_384)
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createEcKeyResponse =
keyClient.createEcKeyWithResponse(createEcKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createEcKeyResponse.getValue().getName(),
createEcKeyResponse.getValue().getId());
CreateOctKeyOptions createOctKeyOptions = new CreateOctKeyOptions("keyName")
.setNotBefore(OffsetDateTime.now().plusDays(1))
.setExpiresOn(OffsetDateTime.now().plusYears(1));
Response<KeyVaultKey> createOctKeyResponse =
keyClient.createOctKeyWithResponse(createOctKeyOptions, new Context("key1", "value1"));
System.out.printf("Created key with name: %s and: id %s%n", createOctKeyResponse.getValue().getName(),
createOctKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getKeyWithResponse() {
KeyClient keyClient = createClient();
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
Response<KeyVaultKey> getKeyResponse =
keyClient.getKeyWithResponse("keyName", keyVersion, new Context("key1", "value1"));
System.out.printf("Retrieved key with name: %s and: id %s%n", getKeyResponse.getValue().getName(),
getKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void getKey() {
KeyClient keyClient = createClient();
KeyVaultKey keyWithVersionValue = keyClient.getKey("keyName");
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersionValue.getName(),
keyWithVersionValue.getId());
String keyVersion = "6A385B124DEF4096AF1361A85B16C204";
KeyVaultKey keyWithVersion = keyClient.getKey("keyName", keyVersion);
System.out.printf("Retrieved key with name: %s and: id %s%n", keyWithVersion.getName(),
keyWithVersion.getId());
}
/**
* Generates a code sample for using
* {@link KeyClient
*/
public void updateKeyPropertiesWithResponse() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
Response<KeyVaultKey> updateKeyResponse =
keyClient.updateKeyPropertiesWithResponse(key.getProperties(), new Context("key1", "value1"),
KeyOperation.ENCRYPT, KeyOperation.DECRYPT);
System.out.printf("Updated key with name: %s and id: %s%n", updateKeyResponse.getValue().getName(),
updateKeyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void updateKeyProperties() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.getKey("keyName");
key.getProperties().setExpiresOn(OffsetDateTime.now().plusDays(60));
KeyVaultKey updatedKey = keyClient.updateKeyProperties(key.getProperties(), KeyOperation.ENCRYPT,
KeyOperation.DECRYPT);
System.out.printf("Key is updated with name %s and id %s %n", updatedKey.getName(), updatedKey.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void getDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<DeletedKey> deletedKeyResponse =
keyClient.getDeletedKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Deleted key with recovery id: %s%n", deletedKeyResponse.getValue().getRecoveryId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKey() {
KeyClient keyClient = createClient();
keyClient.purgeDeletedKey("deletedKeyName");
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void purgeDeletedKeyWithResponse() {
KeyClient keyClient = createClient();
Response<Void> purgeDeletedKeyResponse = keyClient.purgeDeletedKeyWithResponse("deletedKeyName",
new Context("key1", "value1"));
System.out.printf("Purge response status code: %d%n", purgeDeletedKeyResponse.getStatusCode());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void beginRecoverDeletedKey() {
KeyClient keyClient = createClient();
SyncPoller<KeyVaultKey, Void> recoverKeyPoller = keyClient.beginRecoverDeletedKey("deletedKeyName");
PollResponse<KeyVaultKey> recoverKeyPollResponse = recoverKeyPoller.poll();
KeyVaultKey recoveredKey = recoverKeyPollResponse.getValue();
System.out.printf("Recovered key name: %s%n", recoveredKey.getName());
System.out.printf("Recovered key id: %s%n", recoveredKey.getId());
recoverKeyPoller.waitForCompletion();
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKey() {
KeyClient keyClient = createClient();
byte[] keyBackup = keyClient.backupKey("keyName");
System.out.printf("Key backup byte array length: %s%n", keyBackup.length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void backupKeyWithResponse() {
KeyClient keyClient = createClient();
Response<byte[]> backupKeyResponse = keyClient.backupKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Key backup byte array length: %s%n", backupKeyResponse.getValue().length);
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackup() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
KeyVaultKey keyResponse = keyClient.restoreKeyBackup(keyBackupByteArray);
System.out.printf("Restored key with name: %s and: id %s%n", keyResponse.getName(), keyResponse.getId());
}
/**
* Generates a code sample for using {@link KeyClient
*/
public void restoreKeyBackupWithResponse() {
KeyClient keyClient = createClient();
byte[] keyBackupByteArray = {};
Response<KeyVaultKey> keyResponse = keyClient.restoreKeyBackupWithResponse(keyBackupByteArray,
new Context("key1", "value1"));
System.out.printf("Restored key with name: %s and: id %s%n",
keyResponse.getValue().getName(), keyResponse.getValue().getId());
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeys() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys()) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeys(new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
}
keyClient.listPropertiesOfKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties -> {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key with name: %s and type: %s%n", key.getName(),
key.getKeyType());
});
});
}
/**
* Generates a code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listDeletedKeys() {
KeyClient keyClient = createClient();
for (DeletedKey deletedKey : keyClient.listDeletedKeys()) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
for (DeletedKey deletedKey : keyClient.listDeletedKeys(new Context("key1", "value1"))) {
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId());
}
keyClient.listDeletedKeys().iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(deletedKey ->
System.out.printf("Deleted key's recovery id:%s%n", deletedKey.getRecoveryId()));
});
}
/**
* Generates code sample for using {@link KeyClient
* {@link KeyClient
*/
public void listPropertiesOfKeyVersions() {
KeyClient keyClient = createClient();
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName")) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
for (KeyProperties keyProperties : keyClient.listPropertiesOfKeyVersions("keyName", new Context("key1", "value1"))) {
KeyVaultKey key = keyClient.getKey(keyProperties.getName(), keyProperties.getVersion());
System.out.printf("Retrieved key version: %s with name: %s and type: %s%n",
key.getProperties().getVersion(), key.getName(), key.getKeyType());
}
keyClient.listPropertiesOfKeyVersions("keyName").iterableByPage().forEach(pagedResponse -> {
System.out.printf("Got response details. Url: %s. Status code: %d.%n",
pagedResponse.getRequest().getUrl(), pagedResponse.getStatusCode());
pagedResponse.getElements().forEach(keyProperties ->
System.out.printf("Key name: %s. Key version: %s.%n", keyProperties.getName(),
keyProperties.getVersion()));
});
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getRandomBytes() {
KeyClient keyClient = createClient();
int amount = 16;
RandomBytes randomBytes = keyClient.getRandomBytes(amount);
System.out.printf("Retrieved %d random bytes: %s%n", amount, Arrays.toString(randomBytes.getBytes()));
int amountOfBytes = 16;
Response<RandomBytes> response =
keyClient.getRandomBytesWithResponse(amountOfBytes, new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved %d random bytes: %s%n",
response.getStatusCode(), amountOfBytes, Arrays.toString(response.getValue().getBytes()));
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
* {@link KeyClient
*/
public void releaseKey() {
KeyClient keyClient = createClient();
String target = "someAttestationToken";
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey("keyName", target);
System.out.printf("Signed object containing released key: %s%n", releaseKeyResult);
String myKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String myTarget = "someAttestationToken";
ReleaseKeyResult releaseKeyVersionResult = keyClient.releaseKey("keyName", myKeyVersion, myTarget);
System.out.printf("Signed object containing released key: %s%n", releaseKeyVersionResult);
String releaseKeyVersion = "6A385B124DEF4096AF1361A85B16C204";
String releaseTarget = "someAttestationToken";
ReleaseKeyOptions releaseKeyOptions = new ReleaseKeyOptions()
.setAlgorithm(KeyExportEncryptionAlgorithm.RSA_AES_KEY_WRAP_256)
.setNonce("someNonce");
Response<ReleaseKeyResult> releaseKeyResultResponse =
keyClient.releaseKeyWithResponse("keyName", releaseKeyVersion, releaseTarget, releaseKeyOptions,
new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Signed object containing"
+ "released key: %s%n", releaseKeyResultResponse.getStatusCode(),
releaseKeyResultResponse.getValue().getValue());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void rotateKey() {
KeyClient keyClient = createClient();
KeyVaultKey key = keyClient.rotateKey("keyName");
System.out.printf("Rotated key with name: %s and version:%s%n", key.getName(),
key.getProperties().getVersion());
Response<KeyVaultKey> keyResponse = keyClient.rotateKeyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Rotated key with name: %s and"
+ "version: %s%n", keyResponse.getStatusCode(), keyResponse.getValue().getName(),
keyResponse.getValue().getProperties().getVersion());
}
/**
* Generates code samples for using {@link KeyClient
* {@link KeyClient
*/
public void getKeyRotationPolicy() {
KeyClient keyClient = createClient();
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy("keyName");
System.out.printf("Retrieved key rotation policy with id: %s%n", keyRotationPolicy.getId());
Response<KeyRotationPolicy> keyRotationPolicyResponse =
keyClient.getKeyRotationPolicyWithResponse("keyName", new Context("key1", "value1"));
System.out.printf("Response received successfully with status code: %d. Retrieved key rotation policy"
+ "with id: %s%n", keyRotationPolicyResponse.getStatusCode(), keyRotationPolicyResponse.getValue().getId());
}
/**
* Generates code samples for using {@link KeyClient
* and {@link KeyClient
*/
} |
maybe remove custom here since this is prebuilt sample | public void analyzePrebuiltDocument() {
String documentUrl = "{document-url}";
String modelId = "prebuilt-document";
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeDocumentPoller =
documentAnalysisClient.beginAnalyzeDocumentFromUrl(modelId, documentUrl);
AnalyzeResult analyzeResult = analyzeDocumentPoller.getFinalResult();
for (int i = 0; i < analyzeResult.getDocuments().size(); i++) {
final AnalyzedDocument analyzedDocument = analyzeResult.getDocuments().get(i);
System.out.printf("----------- Analyzing custom document %d -----------%n", i);
System.out.printf("Analyzed document has doc type %s with confidence : %.2f%n",
analyzedDocument.getDocType(), analyzedDocument.getConfidence());
}
analyzeResult.getPages().forEach(documentPage -> {
System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n",
documentPage.getWidth(),
documentPage.getHeight(),
documentPage.getUnit());
documentPage.getLines().forEach(documentLine ->
System.out.printf("Line %s is within a bounding box %s.%n",
documentLine.getContent(),
documentLine.getBoundingBox().toString()));
documentPage.getWords().forEach(documentWord ->
System.out.printf("Word %s has a confidence score of %.2f%n.",
documentWord.getContent(),
documentWord.getConfidence()));
});
List<DocumentTable> tables = analyzeResult.getTables();
for (int i = 0; i < tables.size(); i++) {
DocumentTable documentTable = tables.get(i);
System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(),
documentTable.getColumnCount());
documentTable.getCells().forEach(documentTableCell -> {
System.out.printf("Cell '%s', has row index %d and column index %d.%n",
documentTableCell.getContent(),
documentTableCell.getRowIndex(), documentTableCell.getColumnIndex());
});
System.out.println();
}
analyzeResult.getEntities().forEach(documentEntity -> {
System.out.printf("Entity category : %s, sub-category %s%n: ",
documentEntity.getCategory(), documentEntity.getSubCategory());
System.out.printf("Entity content: %s%n: ", documentEntity.getContent());
System.out.printf("Entity confidence: %.2f%n", documentEntity.getConfidence());
});
analyzeResult.getKeyValuePairs().forEach(documentKeyValuePair -> {
System.out.printf("Key content: %s%n", documentKeyValuePair.getKey().getContent());
System.out.printf("Key content bounding region: %s%n",
documentKeyValuePair.getKey().getBoundingRegions().toString());
System.out.printf("Value content: %s%n", documentKeyValuePair.getValue().getContent());
System.out.printf("Value content bounding region: %s%n", documentKeyValuePair.getValue().getBoundingRegions().toString());
});
} | System.out.printf("----------- Analyzing custom document %d -----------%n", i); | public void analyzePrebuiltDocument() {
String documentUrl = "{document-url}";
String modelId = "prebuilt-document";
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeDocumentPoller =
documentAnalysisClient.beginAnalyzeDocumentFromUrl(modelId, documentUrl);
AnalyzeResult analyzeResult = analyzeDocumentPoller.getFinalResult();
for (int i = 0; i < analyzeResult.getDocuments().size(); i++) {
final AnalyzedDocument analyzedDocument = analyzeResult.getDocuments().get(i);
System.out.printf("----------- Analyzing document %d -----------%n", i);
System.out.printf("Analyzed document has doc type %s with confidence : %.2f%n",
analyzedDocument.getDocType(), analyzedDocument.getConfidence());
}
analyzeResult.getPages().forEach(documentPage -> {
System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n",
documentPage.getWidth(),
documentPage.getHeight(),
documentPage.getUnit());
documentPage.getLines().forEach(documentLine ->
System.out.printf("Line %s is within a bounding box %s.%n",
documentLine.getContent(),
documentLine.getBoundingBox().toString()));
documentPage.getWords().forEach(documentWord ->
System.out.printf("Word %s has a confidence score of %.2f%n.",
documentWord.getContent(),
documentWord.getConfidence()));
});
List<DocumentTable> tables = analyzeResult.getTables();
for (int i = 0; i < tables.size(); i++) {
DocumentTable documentTable = tables.get(i);
System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(),
documentTable.getColumnCount());
documentTable.getCells().forEach(documentTableCell -> {
System.out.printf("Cell '%s', has row index %d and column index %d.%n",
documentTableCell.getContent(),
documentTableCell.getRowIndex(), documentTableCell.getColumnIndex());
});
System.out.println();
}
analyzeResult.getEntities().forEach(documentEntity -> {
System.out.printf("Entity category : %s, sub-category %s%n: ",
documentEntity.getCategory(), documentEntity.getSubCategory());
System.out.printf("Entity content: %s%n: ", documentEntity.getContent());
System.out.printf("Entity confidence: %.2f%n", documentEntity.getConfidence());
});
analyzeResult.getKeyValuePairs().forEach(documentKeyValuePair -> {
System.out.printf("Key content: %s%n", documentKeyValuePair.getKey().getContent());
System.out.printf("Key content bounding region: %s%n",
documentKeyValuePair.getKey().getBoundingRegions().toString());
System.out.printf("Value content: %s%n", documentKeyValuePair.getValue().getContent());
System.out.printf("Value content bounding region: %s%n", documentKeyValuePair.getValue().getBoundingRegions().toString());
});
} | class ReadmeSamples {
private final DocumentAnalysisClient documentAnalysisClient = new DocumentAnalysisClientBuilder().buildClient();
private final DocumentModelAdministrationClient documentModelAdminClient =
new DocumentModelAdministrationClientBuilder().buildClient();
/**
* Code snippet for getting sync client using the AzureKeyCredential authentication.
*/
public void useAzureKeyCredentialSyncClient() {
DocumentAnalysisClient documentAnalysisClient = new DocumentAnalysisClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
}
/**
* Code snippet for getting sync DocumentModelAdministration client using the AzureKeyCredential authentication.
*/
public void useAzureKeyCredentialDocumentModelAdministrationClient() {
DocumentModelAdministrationClient documentModelAdminClient = new DocumentModelAdministrationClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
}
/**
* Code snippet for getting async client using AAD authentication.
*/
public void useAadAsyncClient() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
DocumentAnalysisClient documentAnalysisClient = new DocumentAnalysisClientBuilder()
.endpoint("{endpoint}")
.credential(credential)
.buildClient();
}
/**
* Extract layout data for provided document.
*
* @throws IOException Exception thrown when there is an error in reading all the bytes from the File.
*/
public void analyzeLayout() throws IOException {
File layoutDocument = new File("local/file_path/filename.png");
byte[] fileContent = Files.readAllBytes(layoutDocument.toPath());
InputStream fileStream = new ByteArrayInputStream(fileContent);
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeLayoutResultPoller =
documentAnalysisClient.beginAnalyzeDocument("prebuilt-layout", fileStream, layoutDocument.length());
AnalyzeResult analyzeLayoutResult = analyzeLayoutResultPoller.getFinalResult();
analyzeLayoutResult.getPages().forEach(documentPage -> {
System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n",
documentPage.getWidth(),
documentPage.getHeight(),
documentPage.getUnit());
documentPage.getLines().forEach(documentLine ->
System.out.printf("Line %s is within a bounding box %s.%n",
documentLine.getContent(),
documentLine.getBoundingBox().toString()));
documentPage.getSelectionMarks().forEach(documentSelectionMark ->
System.out.printf("Selection mark is %s and is within a bounding box %s with confidence %.2f.%n",
documentSelectionMark.getState().toString(),
documentSelectionMark.getBoundingBox().toString(),
documentSelectionMark.getConfidence()));
});
List<DocumentTable> tables = analyzeLayoutResult.getTables();
for (int i = 0; i < tables.size(); i++) {
DocumentTable documentTable = tables.get(i);
System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(),
documentTable.getColumnCount());
documentTable.getCells().forEach(documentTableCell -> {
System.out.printf("Cell '%s', has row index %d and column index %d.%n", documentTableCell.getContent(),
documentTableCell.getRowIndex(), documentTableCell.getColumnIndex());
});
System.out.println();
}
}
/**
* Code snippet for analyzing receipt data using prebuilt receipt models.
*/
public void analyzeReceiptFromUrl() {
String receiptUrl = "https:
+ "/azure-ai-formrecognizer/src/samples/resources/sample-documents/receipts/contoso-allinone.jpg";
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeReceiptPoller =
documentAnalysisClient.beginAnalyzeDocumentFromUrl("prebuilt-receipt", receiptUrl);
AnalyzeResult receiptResults = analyzeReceiptPoller.getFinalResult();
for (int i = 0; i < receiptResults.getDocuments().size(); i++) {
AnalyzedDocument analyzedReceipt = receiptResults.getDocuments().get(i);
Map<String, DocumentField> receiptFields = analyzedReceipt.getFields();
System.out.printf("----------- Analyzing receipt info %d -----------%n", i);
DocumentField merchantNameField = receiptFields.get("MerchantName");
if (merchantNameField != null) {
if (DocumentFieldType.STRING == merchantNameField.getType()) {
String merchantName = merchantNameField.getValueString();
System.out.printf("Merchant Name: %s, confidence: %.2f%n",
merchantName, merchantNameField.getConfidence());
}
}
DocumentField merchantPhoneNumberField = receiptFields.get("MerchantPhoneNumber");
if (merchantPhoneNumberField != null) {
if (DocumentFieldType.PHONE_NUMBER == merchantPhoneNumberField.getType()) {
String merchantAddress = merchantPhoneNumberField.getValuePhoneNumber();
System.out.printf("Merchant Phone number: %s, confidence: %.2f%n",
merchantAddress, merchantPhoneNumberField.getConfidence());
}
}
DocumentField transactionDateField = receiptFields.get("TransactionDate");
if (transactionDateField != null) {
if (DocumentFieldType.DATE == transactionDateField.getType()) {
LocalDate transactionDate = transactionDateField.getValueDate();
System.out.printf("Transaction Date: %s, confidence: %.2f%n",
transactionDate, transactionDateField.getConfidence());
}
}
DocumentField receiptItemsField = receiptFields.get("Items");
if (receiptItemsField != null) {
System.out.printf("Receipt Items: %n");
if (DocumentFieldType.LIST == receiptItemsField.getType()) {
List<DocumentField> receiptItems = receiptItemsField.getValueList();
receiptItems.stream()
.filter(receiptItem -> DocumentFieldType.MAP == receiptItem.getType())
.map(documentField -> documentField.getValueMap())
.forEach(documentFieldMap -> documentFieldMap.forEach((key, documentField) -> {
if ("Name".equals(key)) {
if (DocumentFieldType.STRING == documentField.getType()) {
String name = documentField.getValueString();
System.out.printf("Name: %s, confidence: %.2fs%n",
name, documentField.getConfidence());
}
}
if ("Quantity".equals(key)) {
if (DocumentFieldType.FLOAT == documentField.getType()) {
Float quantity = documentField.getValueFloat();
System.out.printf("Quantity: %f, confidence: %.2f%n",
quantity, documentField.getConfidence());
}
}
}));
}
}
}
}
/**
* Code snippet for building custom document analysis models using training data.
*/
public void buildModel() {
String trainingFilesUrl = "{SAS_URL_of_your_container_in_blob_storage}";
SyncPoller<DocumentOperationResult, DocumentModel> buildOperationPoller =
documentModelAdminClient.beginBuildModel(trainingFilesUrl,
"my-build-model",
new BuildModelOptions().setDescription("model desc"),
Context.NONE);
DocumentModel documentModel = buildOperationPoller.getFinalResult();
System.out.printf("Model ID: %s%n", documentModel.getModelId());
System.out.printf("Model Description: %s%n", documentModel.getDescription());
System.out.printf("Model created on: %s%n%n", documentModel.getCreatedOn());
documentModel.getDocTypes().forEach((key, docTypeInfo) -> {
System.out.printf("Document type: %s%n", key);
docTypeInfo.getFieldSchema().forEach((name, documentFieldSchema) -> {
System.out.printf("Document field: %s%n", name);
System.out.printf("Document field type: %s%n", documentFieldSchema.getType().toString());
System.out.printf("Document field confidence: %.2f%n", docTypeInfo.getFieldConfidence().get(name));
});
});
}
/**
* Code snippet for analyzing custom documents using custom-built models.
*/
public void analyzeCustomDocument() {
String documentUrl = "{document-url}";
String modelId = "{custom-built-model-ID}";
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeDocumentPoller =
documentAnalysisClient.beginAnalyzeDocumentFromUrl(modelId, documentUrl);
AnalyzeResult analyzeResult = analyzeDocumentPoller.getFinalResult();
for (int i = 0; i < analyzeResult.getDocuments().size(); i++) {
final AnalyzedDocument analyzedDocument = analyzeResult.getDocuments().get(i);
System.out.printf("----------- Analyzing custom document %d -----------%n", i);
System.out.printf("Analyzed document has doc type %s with confidence : %.2f%n",
analyzedDocument.getDocType(), analyzedDocument.getConfidence());
analyzedDocument.getFields().forEach((key, documentField) -> {
System.out.printf("Document Field content: %s%n", documentField.getContent());
System.out.printf("Document Field confidence: %.2f%n", documentField.getConfidence());
System.out.printf("Document Field Type: %.2f%n", documentField.getType().toString());
System.out.printf("Document Field found within bounding region: %s%n",
documentField.getBoundingRegions().toString());
});
}
analyzeResult.getPages().forEach(documentPage -> {
System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n",
documentPage.getWidth(),
documentPage.getHeight(),
documentPage.getUnit());
documentPage.getLines().forEach(documentLine ->
System.out.printf("Line %s is within a bounding box %s.%n",
documentLine.getContent(),
documentLine.getBoundingBox().toString()));
documentPage.getWords().forEach(documentWord ->
System.out.printf("Word %s has a confidence score of %.2f%n.",
documentWord.getContent(),
documentWord.getConfidence()));
});
List<DocumentTable> tables = analyzeResult.getTables();
for (int i = 0; i < tables.size(); i++) {
DocumentTable documentTable = tables.get(i);
System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(),
documentTable.getColumnCount());
documentTable.getCells().forEach(documentTableCell -> {
System.out.printf("Cell '%s', has row index %d and column index %d.%n",
documentTableCell.getContent(),
documentTableCell.getRowIndex(), documentTableCell.getColumnIndex());
});
System.out.println();
}
}
/**
* Code snippet for analyzing general documents using "prebuilt-document" models.
*/
/**
* Code snippet for managing models in form recognizer account.
*/
public void manageModels() {
AtomicReference<String> modelId = new AtomicReference<>();
AccountProperties accountProperties = documentModelAdminClient.getAccountProperties();
System.out.printf("The account has %s models, and we can have at most %s models",
accountProperties.getDocumentModelCount(), accountProperties.getDocumentModelLimit());
PagedIterable<DocumentModelInfo> customDocumentModels = documentModelAdminClient.listModels();
System.out.println("We have following models in the account:");
customDocumentModels.forEach(documentModelInfo -> {
System.out.printf("Model ID: %s%n", documentModelInfo.getModelId());
modelId.set(documentModelInfo.getModelId());
DocumentModel documentModel = documentModelAdminClient.getModel(documentModelInfo.getModelId());
System.out.printf("Model ID: %s%n", documentModel.getModelId());
System.out.printf("Model Description: %s%n", documentModel.getDescription());
System.out.printf("Model created on: %s%n", documentModel.getCreatedOn());
documentModel.getDocTypes().forEach((key, docTypeInfo) -> {
docTypeInfo.getFieldSchema().forEach((field, documentFieldSchema) -> {
System.out.printf("Field: %s", field);
System.out.printf("Field type: %s", documentFieldSchema.getType());
System.out.printf("Field confidence: %.2f", docTypeInfo.getFieldConfidence().get(field));
});
});
});
documentModelAdminClient.deleteModel(modelId.get());
}
/**
* Code snippet for handling exception
*/
public void handlingException() {
try {
documentAnalysisClient.beginAnalyzeDocumentFromUrl("prebuilt-receipt", "invalidSourceUrl");
} catch (HttpResponseException e) {
System.out.println(e.getMessage());
}
}
/**
* Code snippet for getting async client using the AzureKeyCredential authentication.
*/
public void useAzureKeyCredentialAsyncClient() {
DocumentAnalysisAsyncClient documentAnalysisAsyncClient = new DocumentAnalysisClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildAsyncClient();
}
} | class ReadmeSamples {
private final DocumentAnalysisClient documentAnalysisClient = new DocumentAnalysisClientBuilder().buildClient();
private final DocumentModelAdministrationClient documentModelAdminClient =
new DocumentModelAdministrationClientBuilder().buildClient();
/**
* Code snippet for getting sync client using the AzureKeyCredential authentication.
*/
public void useAzureKeyCredentialSyncClient() {
DocumentAnalysisClient documentAnalysisClient = new DocumentAnalysisClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
}
/**
* Code snippet for getting sync DocumentModelAdministration client using the AzureKeyCredential authentication.
*/
public void useAzureKeyCredentialDocumentModelAdministrationClient() {
DocumentModelAdministrationClient documentModelAdminClient = new DocumentModelAdministrationClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
}
/**
* Code snippet for getting async client using AAD authentication.
*/
public void useAadAsyncClient() {
TokenCredential credential = new DefaultAzureCredentialBuilder().build();
DocumentAnalysisClient documentAnalysisClient = new DocumentAnalysisClientBuilder()
.endpoint("{endpoint}")
.credential(credential)
.buildClient();
}
/**
* Extract layout data for provided document.
*
* @throws IOException Exception thrown when there is an error in reading all the bytes from the File.
*/
public void analyzeLayout() throws IOException {
File layoutDocument = new File("local/file_path/filename.png");
byte[] fileContent = Files.readAllBytes(layoutDocument.toPath());
InputStream fileStream = new ByteArrayInputStream(fileContent);
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeLayoutResultPoller =
documentAnalysisClient.beginAnalyzeDocument("prebuilt-layout", fileStream, layoutDocument.length());
AnalyzeResult analyzeLayoutResult = analyzeLayoutResultPoller.getFinalResult();
analyzeLayoutResult.getPages().forEach(documentPage -> {
System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n",
documentPage.getWidth(),
documentPage.getHeight(),
documentPage.getUnit());
documentPage.getLines().forEach(documentLine ->
System.out.printf("Line %s is within a bounding box %s.%n",
documentLine.getContent(),
documentLine.getBoundingBox().toString()));
documentPage.getSelectionMarks().forEach(documentSelectionMark ->
System.out.printf("Selection mark is %s and is within a bounding box %s with confidence %.2f.%n",
documentSelectionMark.getState().toString(),
documentSelectionMark.getBoundingBox().toString(),
documentSelectionMark.getConfidence()));
});
List<DocumentTable> tables = analyzeLayoutResult.getTables();
for (int i = 0; i < tables.size(); i++) {
DocumentTable documentTable = tables.get(i);
System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(),
documentTable.getColumnCount());
documentTable.getCells().forEach(documentTableCell -> {
System.out.printf("Cell '%s', has row index %d and column index %d.%n", documentTableCell.getContent(),
documentTableCell.getRowIndex(), documentTableCell.getColumnIndex());
});
System.out.println();
}
}
/**
* Code snippet for analyzing receipt data using prebuilt receipt models.
*/
public void analyzeReceiptFromUrl() {
String receiptUrl = "https:
+ "/azure-ai-formrecognizer/src/samples/resources/sample-documents/receipts/contoso-allinone.jpg";
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeReceiptPoller =
documentAnalysisClient.beginAnalyzeDocumentFromUrl("prebuilt-receipt", receiptUrl);
AnalyzeResult receiptResults = analyzeReceiptPoller.getFinalResult();
for (int i = 0; i < receiptResults.getDocuments().size(); i++) {
AnalyzedDocument analyzedReceipt = receiptResults.getDocuments().get(i);
Map<String, DocumentField> receiptFields = analyzedReceipt.getFields();
System.out.printf("----------- Analyzing receipt info %d -----------%n", i);
DocumentField merchantNameField = receiptFields.get("MerchantName");
if (merchantNameField != null) {
if (DocumentFieldType.STRING == merchantNameField.getType()) {
String merchantName = merchantNameField.getValueString();
System.out.printf("Merchant Name: %s, confidence: %.2f%n",
merchantName, merchantNameField.getConfidence());
}
}
DocumentField merchantPhoneNumberField = receiptFields.get("MerchantPhoneNumber");
if (merchantPhoneNumberField != null) {
if (DocumentFieldType.PHONE_NUMBER == merchantPhoneNumberField.getType()) {
String merchantAddress = merchantPhoneNumberField.getValuePhoneNumber();
System.out.printf("Merchant Phone number: %s, confidence: %.2f%n",
merchantAddress, merchantPhoneNumberField.getConfidence());
}
}
DocumentField transactionDateField = receiptFields.get("TransactionDate");
if (transactionDateField != null) {
if (DocumentFieldType.DATE == transactionDateField.getType()) {
LocalDate transactionDate = transactionDateField.getValueDate();
System.out.printf("Transaction Date: %s, confidence: %.2f%n",
transactionDate, transactionDateField.getConfidence());
}
}
DocumentField receiptItemsField = receiptFields.get("Items");
if (receiptItemsField != null) {
System.out.printf("Receipt Items: %n");
if (DocumentFieldType.LIST == receiptItemsField.getType()) {
List<DocumentField> receiptItems = receiptItemsField.getValueList();
receiptItems.stream()
.filter(receiptItem -> DocumentFieldType.MAP == receiptItem.getType())
.map(documentField -> documentField.getValueMap())
.forEach(documentFieldMap -> documentFieldMap.forEach((key, documentField) -> {
if ("Name".equals(key)) {
if (DocumentFieldType.STRING == documentField.getType()) {
String name = documentField.getValueString();
System.out.printf("Name: %s, confidence: %.2fs%n",
name, documentField.getConfidence());
}
}
if ("Quantity".equals(key)) {
if (DocumentFieldType.FLOAT == documentField.getType()) {
Float quantity = documentField.getValueFloat();
System.out.printf("Quantity: %f, confidence: %.2f%n",
quantity, documentField.getConfidence());
}
}
}));
}
}
}
}
/**
* Code snippet for building custom document analysis models using training data.
*/
public void buildModel() {
String trainingFilesUrl = "{SAS_URL_of_your_container_in_blob_storage}";
SyncPoller<DocumentOperationResult, DocumentModel> buildOperationPoller =
documentModelAdminClient.beginBuildModel(trainingFilesUrl,
"my-build-model",
new BuildModelOptions().setDescription("model desc"),
Context.NONE);
DocumentModel documentModel = buildOperationPoller.getFinalResult();
System.out.printf("Model ID: %s%n", documentModel.getModelId());
System.out.printf("Model Description: %s%n", documentModel.getDescription());
System.out.printf("Model created on: %s%n%n", documentModel.getCreatedOn());
documentModel.getDocTypes().forEach((key, docTypeInfo) -> {
System.out.printf("Document type: %s%n", key);
docTypeInfo.getFieldSchema().forEach((name, documentFieldSchema) -> {
System.out.printf("Document field: %s%n", name);
System.out.printf("Document field type: %s%n", documentFieldSchema.getType().toString());
System.out.printf("Document field confidence: %.2f%n", docTypeInfo.getFieldConfidence().get(name));
});
});
}
/**
* Code snippet for analyzing custom documents using custom-built models.
*/
public void analyzeCustomDocument() {
String documentUrl = "{document-url}";
String modelId = "{custom-built-model-ID}";
SyncPoller<DocumentOperationResult, AnalyzeResult> analyzeDocumentPoller =
documentAnalysisClient.beginAnalyzeDocumentFromUrl(modelId, documentUrl);
AnalyzeResult analyzeResult = analyzeDocumentPoller.getFinalResult();
for (int i = 0; i < analyzeResult.getDocuments().size(); i++) {
final AnalyzedDocument analyzedDocument = analyzeResult.getDocuments().get(i);
System.out.printf("----------- Analyzing custom document %d -----------%n", i);
System.out.printf("Analyzed document has doc type %s with confidence : %.2f%n",
analyzedDocument.getDocType(), analyzedDocument.getConfidence());
analyzedDocument.getFields().forEach((key, documentField) -> {
System.out.printf("Document Field content: %s%n", documentField.getContent());
System.out.printf("Document Field confidence: %.2f%n", documentField.getConfidence());
System.out.printf("Document Field Type: %.2f%n", documentField.getType().toString());
System.out.printf("Document Field found within bounding region: %s%n",
documentField.getBoundingRegions().toString());
});
}
analyzeResult.getPages().forEach(documentPage -> {
System.out.printf("Page has width: %.2f and height: %.2f, measured with unit: %s%n",
documentPage.getWidth(),
documentPage.getHeight(),
documentPage.getUnit());
documentPage.getLines().forEach(documentLine ->
System.out.printf("Line %s is within a bounding box %s.%n",
documentLine.getContent(),
documentLine.getBoundingBox().toString()));
documentPage.getWords().forEach(documentWord ->
System.out.printf("Word %s has a confidence score of %.2f%n.",
documentWord.getContent(),
documentWord.getConfidence()));
});
List<DocumentTable> tables = analyzeResult.getTables();
for (int i = 0; i < tables.size(); i++) {
DocumentTable documentTable = tables.get(i);
System.out.printf("Table %d has %d rows and %d columns.%n", i, documentTable.getRowCount(),
documentTable.getColumnCount());
documentTable.getCells().forEach(documentTableCell -> {
System.out.printf("Cell '%s', has row index %d and column index %d.%n",
documentTableCell.getContent(),
documentTableCell.getRowIndex(), documentTableCell.getColumnIndex());
});
System.out.println();
}
}
/**
* Code snippet for analyzing general documents using "prebuilt-document" models.
*/
/**
* Code snippet for managing models in form recognizer account.
*/
public void manageModels() {
AtomicReference<String> modelId = new AtomicReference<>();
AccountProperties accountProperties = documentModelAdminClient.getAccountProperties();
System.out.printf("The account has %s models, and we can have at most %s models",
accountProperties.getDocumentModelCount(), accountProperties.getDocumentModelLimit());
PagedIterable<DocumentModelInfo> customDocumentModels = documentModelAdminClient.listModels();
System.out.println("We have following models in the account:");
customDocumentModels.forEach(documentModelInfo -> {
System.out.printf("Model ID: %s%n", documentModelInfo.getModelId());
modelId.set(documentModelInfo.getModelId());
DocumentModel documentModel = documentModelAdminClient.getModel(documentModelInfo.getModelId());
System.out.printf("Model ID: %s%n", documentModel.getModelId());
System.out.printf("Model Description: %s%n", documentModel.getDescription());
System.out.printf("Model created on: %s%n", documentModel.getCreatedOn());
documentModel.getDocTypes().forEach((key, docTypeInfo) -> {
docTypeInfo.getFieldSchema().forEach((field, documentFieldSchema) -> {
System.out.printf("Field: %s", field);
System.out.printf("Field type: %s", documentFieldSchema.getType());
System.out.printf("Field confidence: %.2f", docTypeInfo.getFieldConfidence().get(field));
});
});
});
documentModelAdminClient.deleteModel(modelId.get());
}
/**
* Code snippet for handling exception
*/
public void handlingException() {
try {
documentAnalysisClient.beginAnalyzeDocumentFromUrl("prebuilt-receipt", "invalidSourceUrl");
} catch (HttpResponseException e) {
System.out.println(e.getMessage());
}
}
/**
* Code snippet for getting async client using the AzureKeyCredential authentication.
*/
public void useAzureKeyCredentialAsyncClient() {
DocumentAnalysisAsyncClient documentAnalysisAsyncClient = new DocumentAnalysisClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildAsyncClient();
}
} |
Was this property removed? #Resolved | Mono<Response<ContainerRepositoryProperties>> updatePropertiesWithResponse(ContainerRepositoryProperties repositoryProperties, Context context) {
try {
if (repositoryProperties == null) {
return monoError(logger, new NullPointerException("'value' cannot be null."));
}
RepositoryWriteableProperties writableProperties = new RepositoryWriteableProperties()
.setDeleteEnabled(repositoryProperties.isDeleteEnabled())
.setListEnabled(repositoryProperties.isListEnabled())
.setWriteEnabled(repositoryProperties.isWriteEnabled())
.setReadEnabled(repositoryProperties.isReadEnabled());
return this.serviceClient.updatePropertiesWithResponseAsync(repositoryName, writableProperties, context)
.onErrorMap(Utils::mapException);
} catch (RuntimeException e) {
return monoError(logger, e);
}
} | return monoError(logger, new NullPointerException("'value' cannot be null."));
}
RepositoryWriteableProperties writableProperties = new RepositoryWriteableProperties()
.setDeleteEnabled(repositoryProperties.isDeleteEnabled())
.setListEnabled(repositoryProperties.isListEnabled())
.setWriteEnabled(repositoryProperties.isWriteEnabled())
.setReadEnabled(repositoryProperties.isReadEnabled());
return this.serviceClient.updatePropertiesWithResponseAsync(repositoryName, writableProperties, context)
.onErrorMap(Utils::mapException);
} catch (RuntimeException e) {
return monoError(logger, e);
} | class ContainerRepositoryAsync {
private final ContainerRegistriesImpl serviceClient;
private final String repositoryName;
private final String endpoint;
private final String apiVersion;
private final HttpPipeline httpPipeline;
private final String registryLoginServer;
private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class);
/**
* Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}.
* Each service call goes through the {@code pipeline}.
* @param repositoryName The name of the repository on which the service operations are performed.
* @param endpoint The URL string for the Azure Container Registry service.
* @param httpPipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
*/
ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
if (repositoryName == null) {
throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null."));
}
if (repositoryName.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty."));
}
AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder()
.pipeline(httpPipeline)
.url(endpoint)
.apiVersion(version)
.buildClient();
this.endpoint = endpoint;
this.repositoryName = repositoryName;
this.serviceClient = registryImpl.getContainerRegistries();
this.apiVersion = version;
this.httpPipeline = httpPipeline;
try {
URL endpointUrl = new URL(endpoint);
this.registryLoginServer = endpointUrl.getHost();
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
}
/**
* Gets the Azure Container Registry service endpoint for the current instance.
* @return The service endpoint for the current instance.
*/
public String getName() {
return this.repositoryName;
}
/**
* Gets the Azure Container Registry name for the current instance.
* @return Return the registry name.
*/
public String getRegistryEndpoint() {
return this.endpoint;
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse}
*
* @return A REST response containing the result of the repository delete operation. It returns the count of the tags and
* artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponse() {
return withContext(context -> deleteWithResponse(context));
}
Mono<Response<Void>> deleteWithResponse(Context context) {
try {
return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context)
.flatMap(Utils::deleteResponseToSuccess)
.onErrorMap(Utils::mapException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository}
*
* @return It returns the count of the tags and artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> delete() {
return this.deleteWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact.
*
* @param digest Either a tag or digest that uniquely identifies the artifact.
* @return A new {@link RegistryArtifactAsync} object for the desired repository.
* @throws NullPointerException if {@code digest} is null.
* @throws IllegalArgumentException if {@code digest} is empty.
*/
public RegistryArtifactAsync getArtifact(String digest) {
return new RegistryArtifactAsync(repositoryName, digest, httpPipeline, endpoint, apiVersion);
}
/**
* Fetches all the artifacts associated with the given {@link
*
* <p> If you would like to specify the order in which the tags are returned please
* use the overload that takes in the options parameter {@link | class ContainerRepositoryAsync {
private final ContainerRegistriesImpl serviceClient;
private final String repositoryName;
private final String endpoint;
private final String apiVersion;
private final HttpPipeline httpPipeline;
private final String registryLoginServer;
private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class);
/**
* Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}.
* Each service call goes through the {@code pipeline}.
* @param repositoryName The name of the repository on which the service operations are performed.
* @param endpoint The URL string for the Azure Container Registry service.
* @param httpPipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
*/
ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
if (repositoryName == null) {
throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null."));
}
if (repositoryName.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty."));
}
AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder()
.pipeline(httpPipeline)
.url(endpoint)
.apiVersion(version)
.buildClient();
this.endpoint = endpoint;
this.repositoryName = repositoryName;
this.serviceClient = registryImpl.getContainerRegistries();
this.apiVersion = version;
this.httpPipeline = httpPipeline;
try {
URL endpointUrl = new URL(endpoint);
this.registryLoginServer = endpointUrl.getHost();
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
}
/**
* Gets the Azure Container Registry service endpoint for the current instance.
* @return The service endpoint for the current instance.
*/
public String getName() {
return this.repositoryName;
}
/**
* Gets the Azure Container Registry name for the current instance.
* @return Return the registry name.
*/
public String getRegistryEndpoint() {
return this.endpoint;
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse}
*
* @return A REST response containing the result of the repository delete operation. It returns the count of the tags and
* artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponse() {
return withContext(context -> deleteWithResponse(context));
}
Mono<Response<Void>> deleteWithResponse(Context context) {
try {
return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context)
.flatMap(Utils::deleteResponseToSuccess)
.onErrorMap(Utils::mapException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository}
*
* @return It returns the count of the tags and artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> delete() {
return this.deleteWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact.
*
* @param digest Either a tag or digest that uniquely identifies the artifact.
* @return A new {@link RegistryArtifactAsync} object for the desired repository.
* @throws NullPointerException if {@code digest} is null.
* @throws IllegalArgumentException if {@code digest} is empty.
*/
public RegistryArtifactAsync getArtifact(String digest) {
return new RegistryArtifactAsync(repositoryName, digest, httpPipeline, endpoint, apiVersion);
}
/**
* Fetches all the artifacts associated with the given {@link
*
* <p> If you would like to specify the order in which the tags are returned please
* use the overload that takes in the options parameter {@link | |
Yes, the service decided to remove it for this api-version. hence, I chose to comment rather than completely remove. | Mono<Response<ContainerRepositoryProperties>> updatePropertiesWithResponse(ContainerRepositoryProperties repositoryProperties, Context context) {
try {
if (repositoryProperties == null) {
return monoError(logger, new NullPointerException("'value' cannot be null."));
}
RepositoryWriteableProperties writableProperties = new RepositoryWriteableProperties()
.setDeleteEnabled(repositoryProperties.isDeleteEnabled())
.setListEnabled(repositoryProperties.isListEnabled())
.setWriteEnabled(repositoryProperties.isWriteEnabled())
.setReadEnabled(repositoryProperties.isReadEnabled());
return this.serviceClient.updatePropertiesWithResponseAsync(repositoryName, writableProperties, context)
.onErrorMap(Utils::mapException);
} catch (RuntimeException e) {
return monoError(logger, e);
}
} | return monoError(logger, new NullPointerException("'value' cannot be null."));
}
RepositoryWriteableProperties writableProperties = new RepositoryWriteableProperties()
.setDeleteEnabled(repositoryProperties.isDeleteEnabled())
.setListEnabled(repositoryProperties.isListEnabled())
.setWriteEnabled(repositoryProperties.isWriteEnabled())
.setReadEnabled(repositoryProperties.isReadEnabled());
return this.serviceClient.updatePropertiesWithResponseAsync(repositoryName, writableProperties, context)
.onErrorMap(Utils::mapException);
} catch (RuntimeException e) {
return monoError(logger, e);
} | class ContainerRepositoryAsync {
private final ContainerRegistriesImpl serviceClient;
private final String repositoryName;
private final String endpoint;
private final String apiVersion;
private final HttpPipeline httpPipeline;
private final String registryLoginServer;
private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class);
/**
* Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}.
* Each service call goes through the {@code pipeline}.
* @param repositoryName The name of the repository on which the service operations are performed.
* @param endpoint The URL string for the Azure Container Registry service.
* @param httpPipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
*/
ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
if (repositoryName == null) {
throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null."));
}
if (repositoryName.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty."));
}
AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder()
.pipeline(httpPipeline)
.url(endpoint)
.apiVersion(version)
.buildClient();
this.endpoint = endpoint;
this.repositoryName = repositoryName;
this.serviceClient = registryImpl.getContainerRegistries();
this.apiVersion = version;
this.httpPipeline = httpPipeline;
try {
URL endpointUrl = new URL(endpoint);
this.registryLoginServer = endpointUrl.getHost();
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
}
/**
* Gets the Azure Container Registry service endpoint for the current instance.
* @return The service endpoint for the current instance.
*/
public String getName() {
return this.repositoryName;
}
/**
* Gets the Azure Container Registry name for the current instance.
* @return Return the registry name.
*/
public String getRegistryEndpoint() {
return this.endpoint;
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse}
*
* @return A REST response containing the result of the repository delete operation. It returns the count of the tags and
* artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponse() {
return withContext(context -> deleteWithResponse(context));
}
Mono<Response<Void>> deleteWithResponse(Context context) {
try {
return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context)
.flatMap(Utils::deleteResponseToSuccess)
.onErrorMap(Utils::mapException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository}
*
* @return It returns the count of the tags and artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> delete() {
return this.deleteWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact.
*
* @param digest Either a tag or digest that uniquely identifies the artifact.
* @return A new {@link RegistryArtifactAsync} object for the desired repository.
* @throws NullPointerException if {@code digest} is null.
* @throws IllegalArgumentException if {@code digest} is empty.
*/
public RegistryArtifactAsync getArtifact(String digest) {
return new RegistryArtifactAsync(repositoryName, digest, httpPipeline, endpoint, apiVersion);
}
/**
* Fetches all the artifacts associated with the given {@link
*
* <p> If you would like to specify the order in which the tags are returned please
* use the overload that takes in the options parameter {@link | class ContainerRepositoryAsync {
private final ContainerRegistriesImpl serviceClient;
private final String repositoryName;
private final String endpoint;
private final String apiVersion;
private final HttpPipeline httpPipeline;
private final String registryLoginServer;
private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class);
/**
* Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}.
* Each service call goes through the {@code pipeline}.
* @param repositoryName The name of the repository on which the service operations are performed.
* @param endpoint The URL string for the Azure Container Registry service.
* @param httpPipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests.
*/
ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) {
if (repositoryName == null) {
throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null."));
}
if (repositoryName.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty."));
}
AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder()
.pipeline(httpPipeline)
.url(endpoint)
.apiVersion(version)
.buildClient();
this.endpoint = endpoint;
this.repositoryName = repositoryName;
this.serviceClient = registryImpl.getContainerRegistries();
this.apiVersion = version;
this.httpPipeline = httpPipeline;
try {
URL endpointUrl = new URL(endpoint);
this.registryLoginServer = endpointUrl.getHost();
} catch (MalformedURLException ex) {
throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL"));
}
}
/**
* Gets the Azure Container Registry service endpoint for the current instance.
* @return The service endpoint for the current instance.
*/
public String getName() {
return this.repositoryName;
}
/**
* Gets the Azure Container Registry name for the current instance.
* @return Return the registry name.
*/
public String getRegistryEndpoint() {
return this.endpoint;
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse}
*
* @return A REST response containing the result of the repository delete operation. It returns the count of the tags and
* artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponse() {
return withContext(context -> deleteWithResponse(context));
}
Mono<Response<Void>> deleteWithResponse(Context context) {
try {
return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context)
.flatMap(Utils::deleteResponseToSuccess)
.onErrorMap(Utils::mapException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Delete the repository in the Azure Container Registry for the given {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the repository.</p>
*
* {@codesnippet com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository}
*
* @return It returns the count of the tags and artifacts that are deleted as part of the repository delete.
* @throws ClientAuthenticationException thrown if the client does not have access to the repository.
* @throws HttpResponseException thrown if any other unexpected exception is returned by the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> delete() {
return this.deleteWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact.
*
* @param digest Either a tag or digest that uniquely identifies the artifact.
* @return A new {@link RegistryArtifactAsync} object for the desired repository.
* @throws NullPointerException if {@code digest} is null.
* @throws IllegalArgumentException if {@code digest} is empty.
*/
public RegistryArtifactAsync getArtifact(String digest) {
return new RegistryArtifactAsync(repositoryName, digest, httpPipeline, endpoint, apiVersion);
}
/**
* Fetches all the artifacts associated with the given {@link
*
* <p> If you would like to specify the order in which the tags are returned please
* use the overload that takes in the options parameter {@link | |
Curious what kind of configurations do we support here? Never mind, found it later in the code. #Resolved | public T configuration(Configuration configuration) {
identityClientOptions.setConfiguration(configuration);
return (T) this;
} | identityClientOptions.setConfiguration(configuration); | public T configuration(Configuration configuration) {
identityClientOptions.setConfiguration(configuration);
return (T) this;
} | class CredentialBuilderBase<T extends CredentialBuilderBase<T>> {
IdentityClientOptions identityClientOptions;
CredentialBuilderBase() {
this.identityClientOptions = new IdentityClientOptions();
}
/**
* Specifies the max number of retries when an authentication request fails.
*
* @param maxRetry the number of retries
* @return An updated instance of this builder with the max retry set as specified.
*/
@SuppressWarnings("unchecked")
public T maxRetry(int maxRetry) {
this.identityClientOptions.setMaxRetry(maxRetry);
return (T) this;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
*
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return An updated instance of this builder with the retry timeout set as specified.
*/
@SuppressWarnings("unchecked")
public T retryTimeout(Function<Duration, Duration> retryTimeout) {
this.identityClientOptions.setRetryTimeout(retryTimeout);
return (T) this;
}
/**
* Specifies the options for proxy configuration.
*
* @deprecated Configure the proxy options on the {@link HttpClient} instead and then set that
* client on the credential using {@link
*
* @param proxyOptions the options for proxy configuration
* @return An updated instance of this builder with the proxy options set as specified.
*/
@Deprecated
@SuppressWarnings("unchecked")
public T proxyOptions(ProxyOptions proxyOptions) {
this.identityClientOptions.setProxyOptions(proxyOptions);
return (T) this;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
*
* @param httpPipeline the HttpPipeline to send all requests
* @return An updated instance of this builder with the http pipeline set as specified.
*/
@SuppressWarnings("unchecked")
public T httpPipeline(HttpPipeline httpPipeline) {
this.identityClientOptions.setHttpPipeline(httpPipeline);
return (T) this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param client The HTTP client to use for requests.
* @return An updated instance of this builder with the http client set as specified.
* @throws NullPointerException If {@code client} is {@code null}.
*/
@SuppressWarnings("unchecked")
public T httpClient(HttpClient client) {
Objects.requireNonNull(client);
this.identityClientOptions.setHttpClient(client);
return (T) this;
}
/**
* Sets the configuration store that is used during construction of the credential.
*
* The default configuration store is a clone of the {@link Configuration
* configuration store}.
*
* @param configuration The configuration store used to load Env variables and/or properties from.
*
* @return An updated instance of this builder with the configuration store set as specified.
*/
@SuppressWarnings("unchecked")
} | class CredentialBuilderBase<T extends CredentialBuilderBase<T>> {
IdentityClientOptions identityClientOptions;
CredentialBuilderBase() {
this.identityClientOptions = new IdentityClientOptions();
}
/**
* Specifies the max number of retries when an authentication request fails.
*
* @param maxRetry the number of retries
* @return An updated instance of this builder with the max retry set as specified.
*/
@SuppressWarnings("unchecked")
public T maxRetry(int maxRetry) {
this.identityClientOptions.setMaxRetry(maxRetry);
return (T) this;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
*
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return An updated instance of this builder with the retry timeout set as specified.
*/
@SuppressWarnings("unchecked")
public T retryTimeout(Function<Duration, Duration> retryTimeout) {
this.identityClientOptions.setRetryTimeout(retryTimeout);
return (T) this;
}
/**
* Specifies the options for proxy configuration.
*
* @deprecated Configure the proxy options on the {@link HttpClient} instead and then set that
* client on the credential using {@link
*
* @param proxyOptions the options for proxy configuration
* @return An updated instance of this builder with the proxy options set as specified.
*/
@Deprecated
@SuppressWarnings("unchecked")
public T proxyOptions(ProxyOptions proxyOptions) {
this.identityClientOptions.setProxyOptions(proxyOptions);
return (T) this;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
*
* @param httpPipeline the HttpPipeline to send all requests
* @return An updated instance of this builder with the http pipeline set as specified.
*/
@SuppressWarnings("unchecked")
public T httpPipeline(HttpPipeline httpPipeline) {
this.identityClientOptions.setHttpPipeline(httpPipeline);
return (T) this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param client The HTTP client to use for requests.
* @return An updated instance of this builder with the http client set as specified.
* @throws NullPointerException If {@code client} is {@code null}.
*/
@SuppressWarnings("unchecked")
public T httpClient(HttpClient client) {
Objects.requireNonNull(client);
this.identityClientOptions.setHttpClient(client);
return (T) this;
}
/**
* Sets the configuration store that is used during construction of the credential.
*
* The default configuration store is a clone of the {@link Configuration
* configuration store}.
*
* @param configuration The configuration store used to load Env variables and/or properties from.
*
* @return An updated instance of this builder with the configuration store set as specified.
*/
@SuppressWarnings("unchecked")
} |
nit: use the same name for instance property too ```suggestion this.schemaDefinition = schemaDefinition; ``` | public SchemaRegistrySchema(SchemaProperties properties, String schemaDefinition) {
this.properties = properties;
this.content = schemaDefinition;
} | this.content = schemaDefinition; | public SchemaRegistrySchema(SchemaProperties properties, String schemaDefinition) {
this.properties = properties;
this.schemaDefinition = schemaDefinition;
} | class SchemaRegistrySchema {
private final SchemaProperties properties;
private final String content;
/**
* Creates a new instance.
*
* @param properties Schema's properties.
* @param schemaDefinition The definition of the schema.
*/
/**
* Gets properties related to the schema.
*
* @return Properties of the schema.
*/
public SchemaProperties getProperties() {
return properties;
}
/**
* Gets the content of the schema.
*
* @return The content of the schema.
*/
public String getSchemaDefinition() {
return content;
}
} | class SchemaRegistrySchema {
private final SchemaProperties properties;
private final String schemaDefinition;
/**
* Creates a new instance.
*
* @param properties Schema's properties.
* @param schemaDefinition The definition of the schema.
*/
/**
* Gets properties related to the schema.
*
* @return Properties of the schema.
*/
public SchemaProperties getProperties() {
return properties;
}
/**
* Gets the content of the schema.
*
* @return The content of the schema.
*/
public String getSchemaDefinition() {
return schemaDefinition;
}
} |
If supported by versions of Java we support, have you looked at https://docs.oracle.com/javase/7/docs/api/java/net/URL.html#getAuthority()? It will return both host and port (if port is provided) ```java String endpoint = url.getProtocol() + "://" + url.getAuthority(); ``` | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | endpoint += ":" + url.getPort(); | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} |
Great suggestion, thank you Maor! | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | endpoint += ":" + url.getPort(); | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} |
@maorleger Now that I think about it, `getAuthority()` also includes username and password information if present on the URL, would we want to include any of that in the key identifier in case somebody actually includes it? | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | endpoint += ":" + url.getPort(); | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} |
I'll play it safe and use `getHost()` and `getPort()` for now. | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | endpoint += ":" + url.getPort(); | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} |
That makes sense! Sorry, was afk | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | endpoint += ":" + url.getPort(); | private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("'keyId' cannot be null or empty."));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
if (url.getPort() != -1) {
endpoint += ":" + url.getPort();
}
String keyName = (tokens.length >= 3 ? tokens[2] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key endpoint in key identifier is invalid."));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key name in key identifier is invalid."));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed.", e));
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} | class CryptographyAsyncClient {
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
static final String SECRETS_COLLECTION = "secrets";
JsonWebKey key;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private final CryptographyService service;
private final HttpPipeline pipeline;
private final String keyId;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private String keyCollection;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
this.service = RestProxy.create(CryptographyService.class, pipeline);
this.cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.key = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
this.service = null;
this.cryptographyServiceClient = null;
initializeCryptoClients();
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
this.localKeyCryptographyClient = new RsaKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
this.localKeyCryptographyClient = new EcKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
this.localKeyCryptographyClient = new AesKeyCryptographyClient(this.key, this.cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The JSON Web Key type: %s is not supported.", this.key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(this.keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(this::getKeyWithResponse);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
if (cryptographyServiceClient != null) {
return cryptographyServiceClient.getKey(context);
} else {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Operation not supported when in operating local-only mode"));
}
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(plaintext, "'plaintext' cannot be null.");
return encrypt(algorithm, plaintext, null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null.");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(algorithm, plaintext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context, key);
});
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt operation is missing permission/not supported for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
Objects.requireNonNull(algorithm, "'algorithm' cannot be null.");
Objects.requireNonNull(algorithm, "'ciphertext' cannot be null.");
return decrypt(algorithm, ciphertext, null);
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null.");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(algorithm, ciphertext, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context, key);
});
}
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include: {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Sign Operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify operation is not allowed for key with id: %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
} |
I wonder if this should return the non-generic `HashMap.class` | public void createGenericTypeReference() {
final TypeReference<HashMap<String, Object>> typeReference = new TypeReference<HashMap<String, Object>>() {};
final Map<String, Object> expectedJavaType = new HashMap<String, Object>() {};
assertEquals(expectedJavaType.getClass().getGenericSuperclass(), typeReference.getJavaType());
assertNull(typeReference.getClazz());
} | assertNull(typeReference.getClazz()); | public void createGenericTypeReference() {
final TypeReference<HashMap<String, Object>> typeReference = new TypeReference<HashMap<String, Object>>() { };
final Map<String, Object> expectedJavaType = new HashMap<String, Object>() { };
assertEquals(expectedJavaType.getClass().getGenericSuperclass(), typeReference.getJavaType());
assertEquals(HashMap.class, typeReference.getJavaClass());
} | class TypeReferenceTests {
@Test
@Test
public void createFactoryInstance() {
TypeReference<Integer> typeReference = TypeReference.createInstance(int.class);
assertEquals(int.class, typeReference.getJavaType());
assertEquals(int.class, typeReference.getClazz());
}
@Test
public void createTypeReferenceWithoutType() {
IllegalArgumentException thrown
= assertThrows(IllegalArgumentException.class, () -> new TypeReference() {});
assertEquals("Type constructed without type information.", thrown.getMessage());
}
} | class TypeReferenceTests {
@Test
@Test
public void createFactoryInstance() {
TypeReference<Integer> typeReference = TypeReference.createInstance(int.class);
assertEquals(int.class, typeReference.getJavaType());
assertEquals(int.class, typeReference.getJavaClass());
}
@SuppressWarnings("rawtypes")
@Test
public void createTypeReferenceWithoutType() {
IllegalArgumentException thrown
= assertThrows(IllegalArgumentException.class, () -> new TypeReference() { });
assertEquals("Type constructed without type information.", thrown.getMessage());
}
} |
I wonder if this should be made into a method on this class to make it easier to override the implementation | public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null
: Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
} | status = LongRunningOperationStatus.FAILED; | new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now);
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, new DefaultJsonSerializer());
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null");
}
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = );
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer();
private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class);
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, DEFAULT_SERIALIZER);
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer;
}
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
logger.info("Failed to parse Location header into a URL.", e);
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = );
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} |
Should there be any guards here to prevent multiple threads from attempting to determine whether polling is possible? Additionally, once a polling strategy has been determined is it safe to use that pattern for all future polling attempts? | public Mono<Boolean> canPoll(Response<?> initialResponse) {
return Flux.fromIterable(pollingStrategies)
.concatMap(strategy -> strategy.canPoll(initialResponse)
.map(canPoll -> Tuples.of(strategy, canPoll)))
.takeUntil(Tuple2::getT2)
.last()
.map(tuple2 -> {
this.pollableStrategy = tuple2.getT1();
return true;
})
.defaultIfEmpty(false);
} | return Flux.fromIterable(pollingStrategies) | public Mono<Boolean> canPoll(Response<?> initialResponse) {
return Flux.fromIterable(pollingStrategies)
.concatMap(strategy -> strategy.canPoll(initialResponse)
.map(canPoll -> Tuples.of(strategy, canPoll)))
.takeUntil(Tuple2::getT2)
.last()
.map(tuple2 -> {
this.pollableStrategy = tuple2.getT1();
return true;
})
.defaultIfEmpty(false);
} | class ChainedPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final List<PollingStrategy<T, U>> pollingStrategies;
private PollingStrategy<T, U> pollableStrategy = null;
/**
* Creates a chained polling strategy with a list of polling strategies.
* @param strategies the list of polling strategies
*/
public ChainedPollingStrategy(List<PollingStrategy<T, U>> strategies) {
this.pollingStrategies = Collections.unmodifiableList(strategies);
}
@Override
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<U> getResult(PollingContext<T> context, TypeReference<U> resultType) {
return pollableStrategy.getResult(context, resultType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
return pollableStrategy.onInitialResponse(response, pollingContext, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> context, TypeReference<T> pollResponseType) {
return pollableStrategy.poll(context, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) {
return pollableStrategy.cancel(pollingContext, initialResponse);
}
} | class ChainedPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final ClientLogger logger = new ClientLogger(ChainedPollingStrategy.class);
private final List<PollingStrategy<T, U>> pollingStrategies;
private PollingStrategy<T, U> pollableStrategy = null;
/**
* Creates a chained polling strategy with a list of polling strategies.
* @param strategies the list of polling strategies
* @throws NullPointerException If {@code strategies} is null.
* @throws IllegalArgumentException If {@code strategies} is an empty list.
*/
public ChainedPollingStrategy(List<PollingStrategy<T, U>> strategies) {
Objects.requireNonNull(strategies, "'strategies' cannot be null.");
if (strategies.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'strategies' cannot be empty."));
}
this.pollingStrategies = Collections.unmodifiableList(strategies);
}
@Override
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<U> getResult(PollingContext<T> context, TypeReference<U> resultType) {
return pollableStrategy.getResult(context, resultType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
return pollableStrategy.onInitialResponse(response, pollingContext, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> context, TypeReference<T> pollResponseType) {
return pollableStrategy.poll(context, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) {
return pollableStrategy.cancel(pollingContext, initialResponse);
}
} |
Should this throw if `strategies` is null or empty? | public ChainedPollingStrategy(List<PollingStrategy<T, U>> strategies) {
this.pollingStrategies = Collections.unmodifiableList(strategies);
} | this.pollingStrategies = Collections.unmodifiableList(strategies); | public ChainedPollingStrategy(List<PollingStrategy<T, U>> strategies) {
Objects.requireNonNull(strategies, "'strategies' cannot be null.");
if (strategies.isEmpty()) {
throw logger.logExceptionAsError(new IllegalArgumentException("'strategies' cannot be empty."));
}
this.pollingStrategies = Collections.unmodifiableList(strategies);
} | class ChainedPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final List<PollingStrategy<T, U>> pollingStrategies;
private PollingStrategy<T, U> pollableStrategy = null;
/**
* Creates a chained polling strategy with a list of polling strategies.
* @param strategies the list of polling strategies
*/
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
return Flux.fromIterable(pollingStrategies)
.concatMap(strategy -> strategy.canPoll(initialResponse)
.map(canPoll -> Tuples.of(strategy, canPoll)))
.takeUntil(Tuple2::getT2)
.last()
.map(tuple2 -> {
this.pollableStrategy = tuple2.getT1();
return true;
})
.defaultIfEmpty(false);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<U> getResult(PollingContext<T> context, TypeReference<U> resultType) {
return pollableStrategy.getResult(context, resultType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
return pollableStrategy.onInitialResponse(response, pollingContext, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> context, TypeReference<T> pollResponseType) {
return pollableStrategy.poll(context, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) {
return pollableStrategy.cancel(pollingContext, initialResponse);
}
} | class ChainedPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final ClientLogger logger = new ClientLogger(ChainedPollingStrategy.class);
private final List<PollingStrategy<T, U>> pollingStrategies;
private PollingStrategy<T, U> pollableStrategy = null;
/**
* Creates a chained polling strategy with a list of polling strategies.
* @param strategies the list of polling strategies
* @throws NullPointerException If {@code strategies} is null.
* @throws IllegalArgumentException If {@code strategies} is an empty list.
*/
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
return Flux.fromIterable(pollingStrategies)
.concatMap(strategy -> strategy.canPoll(initialResponse)
.map(canPoll -> Tuples.of(strategy, canPoll)))
.takeUntil(Tuple2::getT2)
.last()
.map(tuple2 -> {
this.pollableStrategy = tuple2.getT1();
return true;
})
.defaultIfEmpty(false);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<U> getResult(PollingContext<T> context, TypeReference<U> resultType) {
return pollableStrategy.getResult(context, resultType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
return pollableStrategy.onInitialResponse(response, pollingContext, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> context, TypeReference<T> pollResponseType) {
return pollableStrategy.poll(context, pollResponseType);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if {@link
*/
@Override
public Mono<T> cancel(PollingContext<T> pollingContext, PollResponse<T> initialResponse) {
return pollableStrategy.cancel(pollingContext, initialResponse);
}
} |
Missing `@throws` in the Javadocs | public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null");
} | this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null"); | public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer;
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, new DefaultJsonSerializer());
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
*/
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null
: Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer();
private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class);
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, DEFAULT_SERIALIZER);
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
* @throws NullPointerException If {@code httpPipeline} is null.
*/
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
logger.info("Failed to parse Location header into a URL.", e);
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now);
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} |
Should log a warning here, this feels like it could be a service bug if an invalid polling location is returned. | public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
return Mono.just(false);
}
}
return Mono.just(false);
} | return Mono.just(false); | public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
logger.info("Failed to parse Location header into a URL.", e);
return Mono.just(false);
}
}
return Mono.just(false);
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, new DefaultJsonSerializer());
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null");
}
@Override
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null
: Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer();
private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class);
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, DEFAULT_SERIALIZER);
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer;
}
@Override
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now);
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} |
Why not use `response.getBodyAsString(Charset)`? Getting the bytes and converting to `BinaryData` and then to String may result in more overhead | public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null
: Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
} | pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString()); | new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now);
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, new DefaultJsonSerializer());
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null");
}
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = );
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer();
private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class);
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, DEFAULT_SERIALIZER);
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer;
}
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
logger.info("Failed to parse Location header into a URL.", e);
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = );
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} |
See other comments about retry headers | public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null
: Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
});
} | : Duration.ofSeconds(Long.parseLong(retryAfterValue)); | new HttpRequest(HttpMethod.GET, pollingContext.getData(PollingConstants.LOCATION));
return httpPipeline.send(request).flatMap(response -> {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
LongRunningOperationStatus status;
if (response.getStatusCode() == 202) {
status = LongRunningOperationStatus.IN_PROGRESS;
} else if (response.getStatusCode() >= 200 && response.getStatusCode() <= 204) {
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
} else {
status = LongRunningOperationStatus.FAILED;
}
return response.getBodyAsByteArray().map(BinaryData::fromBytes).flatMap(binaryData -> {
pollingContext.setData(PollingConstants.POLL_RESPONSE_BODY, binaryData.toString());
Duration retryAfter = ImplUtils.getRetryAfterFromHeaders(response.getHeaders(), OffsetDateTime::now);
return PollingUtils.deserializeResponse(binaryData, serializer, pollResponseType)
.map(value -> new PollResponse<>(status, value, retryAfter));
});
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, new DefaultJsonSerializer());
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null");
}
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = );
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} | class LocationPollingStrategy<T, U> implements PollingStrategy<T, U> {
private static final ObjectSerializer DEFAULT_SERIALIZER = new DefaultJsonSerializer();
private final ClientLogger logger = new ClientLogger(LocationPollingStrategy.class);
private final HttpPipeline httpPipeline;
private final ObjectSerializer serializer;
/**
* Creates an instance of the location polling strategy using a JSON serializer.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline) {
this(httpPipeline, DEFAULT_SERIALIZER);
}
/**
* Creates an instance of the location polling strategy.
*
* @param httpPipeline an instance of {@link HttpPipeline} to send requests with
* @param serializer a custom serializer for serializing and deserializing polling responses
* @throws NullPointerException If {@code httpPipeline} is null.
*/
public LocationPollingStrategy(HttpPipeline httpPipeline, ObjectSerializer serializer) {
this.httpPipeline = Objects.requireNonNull(httpPipeline, "'httpPipeline' cannot be null");
this.serializer = (serializer == null) ? DEFAULT_SERIALIZER : serializer;
}
@Override
public Mono<Boolean> canPoll(Response<?> initialResponse) {
HttpHeader locationHeader = initialResponse.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
try {
new URL(locationHeader.getValue());
return Mono.just(true);
} catch (MalformedURLException e) {
logger.info("Failed to parse Location header into a URL.", e);
return Mono.just(false);
}
}
return Mono.just(false);
}
@Override
public Mono<PollResponse<T>> onInitialResponse(Response<?> response, PollingContext<T> pollingContext,
TypeReference<T> pollResponseType) {
HttpHeader locationHeader = response.getHeaders().get(PollingConstants.LOCATION);
if (locationHeader != null) {
pollingContext.setData(PollingConstants.LOCATION, locationHeader.getValue());
}
pollingContext.setData(PollingConstants.HTTP_METHOD, response.getRequest().getHttpMethod().name());
pollingContext.setData(PollingConstants.REQUEST_URL, response.getRequest().getUrl().toString());
if (response.getStatusCode() == 200
|| response.getStatusCode() == 201
|| response.getStatusCode() == 202
|| response.getStatusCode() == 204) {
String retryAfterValue = response.getHeaders().getValue(PollingConstants.RETRY_AFTER);
Duration retryAfter = retryAfterValue == null ? null : Duration.ofSeconds(Long.parseLong(retryAfterValue));
return PollingUtils.convertResponse(response.getValue(), serializer, pollResponseType)
.map(value -> new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, value, retryAfter))
.switchIfEmpty(Mono.defer(() -> Mono.just(new PollResponse<>(
LongRunningOperationStatus.IN_PROGRESS, null, retryAfter))));
} else {
return Mono.error(new AzureException(String.format("Operation failed or cancelled with status code %d,"
+ ", 'Location' header: %s, and response body: %s", response.getStatusCode(), locationHeader,
PollingUtils.serializeResponse(response.getValue(), serializer))));
}
}
@Override
public Mono<PollResponse<T>> poll(PollingContext<T> pollingContext, TypeReference<T> pollResponseType) {
HttpRequest request = );
}
@Override
public Mono<U> getResult(PollingContext<T> pollingContext, TypeReference<U> resultType) {
if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.error(new AzureException("Long running operation failed."));
} else if (pollingContext.getLatestResponse().getStatus() == LongRunningOperationStatus.USER_CANCELLED) {
return Mono.error(new AzureException("Long running operation cancelled."));
}
String finalGetUrl;
String httpMethod = pollingContext.getData(PollingConstants.HTTP_METHOD);
if (HttpMethod.PUT.name().equalsIgnoreCase(httpMethod)
|| HttpMethod.PATCH.name().equalsIgnoreCase(httpMethod)) {
finalGetUrl = pollingContext.getData(PollingConstants.REQUEST_URL);
} else if (HttpMethod.POST.name().equalsIgnoreCase(httpMethod)
&& pollingContext.getData(PollingConstants.LOCATION) != null) {
finalGetUrl = pollingContext.getData(PollingConstants.LOCATION);
} else {
return Mono.error(new AzureException("Cannot get final result"));
}
if (finalGetUrl == null) {
String latestResponseBody = pollingContext.getData(PollingConstants.POLL_RESPONSE_BODY);
return PollingUtils.deserializeResponse(BinaryData.fromString(latestResponseBody), serializer, resultType);
} else {
HttpRequest request = new HttpRequest(HttpMethod.GET, finalGetUrl);
return httpPipeline.send(request)
.flatMap(HttpResponse::getBodyAsByteArray)
.map(BinaryData::fromBytes)
.flatMap(binaryData -> PollingUtils.deserializeResponse(binaryData, serializer, resultType));
}
}
} |
@alzimmermsft: I am creating a second PR to fix the sync `block()` issue we discussed. But does this change look OK from the reactor perspective from the async codepath? | public Mono<Void> recordAndStartPlaybackAsync() {
return runSyncOrAsync()
.then(startRecordingAsync())
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("record");
})
.then(Mono.defer(() -> runSyncOrAsync()))
.then(Mono.defer(() -> stopRecordingAsync()))
.then(Mono.defer(() -> startPlaybackAsync()))
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("playback");
});
} | return runSyncOrAsync() | public Mono<Void> recordAndStartPlaybackAsync() {
return runSyncOrAsync()
.then(startRecordingAsync())
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("record");
})
.then(Mono.defer(() -> runSyncOrAsync()))
.then(Mono.defer(() -> stopRecordingAsync()))
.then(Mono.defer(() -> startPlaybackAsync()))
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("playback");
});
} | class PerfStressTest<TOptions extends PerfStressOptions> {
private final reactor.netty.http.client.HttpClient recordPlaybackHttpClient;
private final TestProxyPolicy testProxyPolicy;
private String recordingId;
protected final TOptions options;
protected final HttpClient httpClient;
protected final Iterable<HttpPipelinePolicy> policies;
/**
* Creates an instance of performance test.
* @param options the options configured for the test.
* @throws IllegalStateException if SSL context cannot be created.
*/
public PerfStressTest(TOptions options) {
this.options = options;
final SslContext sslContext;
if (options.isInsecure()) {
try {
sslContext = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build();
} catch (SSLException e) {
throw new IllegalStateException(e);
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).build();
} else {
sslContext = null;
httpClient = null;
}
if (options.getTestProxy() != null) {
if (options.isInsecure()) {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
} else {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create();
}
testProxyPolicy = new TestProxyPolicy(options.getTestProxy());
policies = Arrays.asList(testProxyPolicy);
} else {
recordPlaybackHttpClient = null;
testProxyPolicy = null;
policies = null;
}
}
/**
* Attempts to configure a ClientBuilder using reflection. If a ClientBuilder does not follow the standard convention,
* it can be configured manually using the "httpClient" and "policies" fields.
* @param clientBuilder The client builder.
* @throws IllegalStateException If reflective access to get httpClient or addPolicy methods fail.
*/
protected void configureClientBuilder(Object clientBuilder) {
if (httpClient != null || policies != null) {
Class<?> clientBuilderClass = clientBuilder.getClass();
try {
if (httpClient != null) {
Method httpClientMethod = clientBuilderClass.getMethod("httpClient", HttpClient.class);
httpClientMethod.invoke(clientBuilder, httpClient);
}
if (policies != null) {
Method addPolicyMethod = clientBuilderClass.getMethod("addPolicy", HttpPipelinePolicy.class);
for (HttpPipelinePolicy policy : policies) {
addPolicyMethod.invoke(clientBuilder, policy);
}
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Runs the setup required prior to running the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> globalSetupAsync() {
return Mono.empty();
}
/**
* Runs the setup required prior to running an individual thread in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> setupAsync() {
return Mono.empty();
}
/**
* Records responses and starts async tests in playback mode.
* @return An empty {@link Mono}.
*/
private Mono<Void> runSyncOrAsync() {
if (options.isSync()) {
return Mono.empty().then().doOnSuccess(x -> run());
} else {
return runAsync();
}
}
/**
* Runs the performance test.
*/
public abstract void run();
/**
* Runs the performance test asynchronously.
* @return An empty {@link Mono}
*/
public abstract Mono<Void> runAsync();
/**
* Stops playback tests.
* @return An empty {@link Mono}.
*/
public Mono<Void> stopPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> {
h.set("x-recording-id", recordingId);
h.set("x-purge-inmemory-recording", Boolean.toString(true));
})
.post()
.uri(options.getTestProxy().resolve("/playback/stop"))
.response()
.doOnSuccess(response -> {
testProxyPolicy.setMode(null);
testProxyPolicy.setRecordingId(null);
})
.then();
}
/**
* Runs the cleanup logic after an individual thread finishes in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> cleanupAsync() {
return Mono.empty();
}
/**
* Runs the cleanup logic after the performance test finishes.
* @return An empty {@link Mono}
*/
public Mono<Void> globalCleanupAsync() {
return Mono.empty();
}
private Mono<Void> startRecordingAsync() {
return recordPlaybackHttpClient
.post()
.uri(options.getTestProxy().resolve("/record/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
private Mono<Void> stopRecordingAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/record/stop"))
.response()
.then();
}
private Mono<Void> startPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/playback/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
} | class PerfStressTest<TOptions extends PerfStressOptions> {
private final reactor.netty.http.client.HttpClient recordPlaybackHttpClient;
private final TestProxyPolicy testProxyPolicy;
private String recordingId;
protected final TOptions options;
protected final HttpClient httpClient;
protected final Iterable<HttpPipelinePolicy> policies;
/**
* Creates an instance of performance test.
* @param options the options configured for the test.
* @throws IllegalStateException if SSL context cannot be created.
*/
public PerfStressTest(TOptions options) {
this.options = options;
final SslContext sslContext;
if (options.isInsecure()) {
try {
sslContext = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build();
} catch (SSLException e) {
throw new IllegalStateException(e);
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).build();
} else {
sslContext = null;
httpClient = null;
}
if (options.getTestProxy() != null) {
if (options.isInsecure()) {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
} else {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create();
}
testProxyPolicy = new TestProxyPolicy(options.getTestProxy());
policies = Arrays.asList(testProxyPolicy);
} else {
recordPlaybackHttpClient = null;
testProxyPolicy = null;
policies = null;
}
}
/**
* Attempts to configure a ClientBuilder using reflection. If a ClientBuilder does not follow the standard convention,
* it can be configured manually using the "httpClient" and "policies" fields.
* @param clientBuilder The client builder.
* @throws IllegalStateException If reflective access to get httpClient or addPolicy methods fail.
*/
protected void configureClientBuilder(Object clientBuilder) {
if (httpClient != null || policies != null) {
Class<?> clientBuilderClass = clientBuilder.getClass();
try {
if (httpClient != null) {
Method httpClientMethod = clientBuilderClass.getMethod("httpClient", HttpClient.class);
httpClientMethod.invoke(clientBuilder, httpClient);
}
if (policies != null) {
Method addPolicyMethod = clientBuilderClass.getMethod("addPolicy", HttpPipelinePolicy.class);
for (HttpPipelinePolicy policy : policies) {
addPolicyMethod.invoke(clientBuilder, policy);
}
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Runs the setup required prior to running the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> globalSetupAsync() {
return Mono.empty();
}
/**
* Runs the setup required prior to running an individual thread in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> setupAsync() {
return Mono.empty();
}
/**
* Records responses and starts async tests in playback mode.
* @return An empty {@link Mono}.
*/
private Mono<Void> runSyncOrAsync() {
if (options.isSync()) {
return Mono.empty().then().doOnSuccess(x -> run());
} else {
return runAsync();
}
}
/**
* Runs the performance test.
*/
public abstract void run();
/**
* Runs the performance test asynchronously.
* @return An empty {@link Mono}
*/
public abstract Mono<Void> runAsync();
/**
* Stops playback tests.
* @return An empty {@link Mono}.
*/
public Mono<Void> stopPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> {
h.set("x-recording-id", recordingId);
h.set("x-purge-inmemory-recording", Boolean.toString(true));
})
.post()
.uri(options.getTestProxy().resolve("/playback/stop"))
.response()
.doOnSuccess(response -> {
testProxyPolicy.setMode(null);
testProxyPolicy.setRecordingId(null);
})
.then();
}
/**
* Runs the cleanup logic after an individual thread finishes in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> cleanupAsync() {
return Mono.empty();
}
/**
* Runs the cleanup logic after the performance test finishes.
* @return An empty {@link Mono}
*/
public Mono<Void> globalCleanupAsync() {
return Mono.empty();
}
private Mono<Void> startRecordingAsync() {
return recordPlaybackHttpClient
.post()
.uri(options.getTestProxy().resolve("/record/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
private Mono<Void> stopRecordingAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/record/stop"))
.response()
.then();
}
private Mono<Void> startPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/playback/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
} |
Does this code path need to be guaranteed to only happen once? | public Mono<Void> runAsync() {
if (_firstRun) {
_firstRun = false;
return sendRequest().repeat(options.getFirstRunExtraRequests()).then();
}
else {
return sendRequest();
}
} | _firstRun = false; | public Mono<Void> runAsync() {
if (firstRun) {
firstRun = false;
return sendRequest().repeat(options.getFirstRunExtraRequests()).then();
}
else {
return sendRequest();
}
} | class HttpPipelineTest extends PerfStressTest<HttpPipelineOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private final HttpPipeline httpPipeline;
private final byte[] buffer = new byte[BUFFER_SIZE];
private boolean _firstRun = true;
/**
* Creates an instance of the {@link HttpPipelineTest}.
* @param options options to configure the HTTP pipeline.
*/
public HttpPipelineTest(HttpPipelineOptions options) {
super(options);
HttpPipelineBuilder builder = new HttpPipelineBuilder();
if (httpClient != null) {
builder = builder.httpClient(httpClient);
}
if (policies != null) {
ArrayList<HttpPipelinePolicy> policyList = new ArrayList<>();
for (HttpPipelinePolicy policy : policies) {
policyList.add(policy);
}
builder.policies(policyList.toArray(new HttpPipelinePolicy[0]));
}
httpPipeline = builder.build();
}
@Override
public void run() {
runAsync().block();
}
@Override
public Mono<Void> sendRequest() {
HttpRequest request = new HttpRequest(HttpMethod.GET, options.getUrl());
return httpPipeline
.send(request)
.flatMapMany(HttpResponse::getBody)
.map(b -> {
int readCount = 0;
int remaining = b.remaining();
while (readCount < remaining) {
int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE);
b.get(buffer, 0, expectedReadCount);
readCount += expectedReadCount;
}
return 1;
})
.then();
}
} | class HttpPipelineTest extends PerfStressTest<HttpPipelineOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private final HttpPipeline httpPipeline;
private final byte[] buffer = new byte[BUFFER_SIZE];
private boolean firstRun = true;
/**
* Creates an instance of the {@link HttpPipelineTest}.
* @param options options to configure the HTTP pipeline.
*/
public HttpPipelineTest(HttpPipelineOptions options) {
super(options);
HttpPipelineBuilder builder = new HttpPipelineBuilder();
if (httpClient != null) {
builder = builder.httpClient(httpClient);
}
if (policies != null) {
ArrayList<HttpPipelinePolicy> policyList = new ArrayList<>();
for (HttpPipelinePolicy policy : policies) {
policyList.add(policy);
}
builder.policies(policyList.toArray(new HttpPipelinePolicy[0]));
}
httpPipeline = builder.build();
}
@Override
public void run() {
runAsync().block();
}
@Override
public Mono<Void> sendRequest() {
HttpRequest request = new HttpRequest(HttpMethod.GET, options.getUrl());
return httpPipeline
.send(request)
.flatMapMany(HttpResponse::getBody)
.map(b -> {
int readCount = 0;
int remaining = b.remaining();
while (readCount < remaining) {
int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE);
b.get(buffer, 0, expectedReadCount);
readCount += expectedReadCount;
}
return 1;
})
.then();
}
} |
Yes, but should this path only be taken if the first run requires extra requests? | public Mono<Void> recordAndStartPlaybackAsync() {
return runSyncOrAsync()
.then(startRecordingAsync())
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("record");
})
.then(Mono.defer(() -> runSyncOrAsync()))
.then(Mono.defer(() -> stopRecordingAsync()))
.then(Mono.defer(() -> startPlaybackAsync()))
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("playback");
});
} | return runSyncOrAsync() | public Mono<Void> recordAndStartPlaybackAsync() {
return runSyncOrAsync()
.then(startRecordingAsync())
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("record");
})
.then(Mono.defer(() -> runSyncOrAsync()))
.then(Mono.defer(() -> stopRecordingAsync()))
.then(Mono.defer(() -> startPlaybackAsync()))
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("playback");
});
} | class PerfStressTest<TOptions extends PerfStressOptions> {
private final reactor.netty.http.client.HttpClient recordPlaybackHttpClient;
private final TestProxyPolicy testProxyPolicy;
private String recordingId;
protected final TOptions options;
protected final HttpClient httpClient;
protected final Iterable<HttpPipelinePolicy> policies;
/**
* Creates an instance of performance test.
* @param options the options configured for the test.
* @throws IllegalStateException if SSL context cannot be created.
*/
public PerfStressTest(TOptions options) {
this.options = options;
final SslContext sslContext;
if (options.isInsecure()) {
try {
sslContext = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build();
} catch (SSLException e) {
throw new IllegalStateException(e);
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).build();
} else {
sslContext = null;
httpClient = null;
}
if (options.getTestProxy() != null) {
if (options.isInsecure()) {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
} else {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create();
}
testProxyPolicy = new TestProxyPolicy(options.getTestProxy());
policies = Arrays.asList(testProxyPolicy);
} else {
recordPlaybackHttpClient = null;
testProxyPolicy = null;
policies = null;
}
}
/**
* Attempts to configure a ClientBuilder using reflection. If a ClientBuilder does not follow the standard convention,
* it can be configured manually using the "httpClient" and "policies" fields.
* @param clientBuilder The client builder.
* @throws IllegalStateException If reflective access to get httpClient or addPolicy methods fail.
*/
protected void configureClientBuilder(Object clientBuilder) {
if (httpClient != null || policies != null) {
Class<?> clientBuilderClass = clientBuilder.getClass();
try {
if (httpClient != null) {
Method httpClientMethod = clientBuilderClass.getMethod("httpClient", HttpClient.class);
httpClientMethod.invoke(clientBuilder, httpClient);
}
if (policies != null) {
Method addPolicyMethod = clientBuilderClass.getMethod("addPolicy", HttpPipelinePolicy.class);
for (HttpPipelinePolicy policy : policies) {
addPolicyMethod.invoke(clientBuilder, policy);
}
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Runs the setup required prior to running the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> globalSetupAsync() {
return Mono.empty();
}
/**
* Runs the setup required prior to running an individual thread in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> setupAsync() {
return Mono.empty();
}
/**
* Records responses and starts async tests in playback mode.
* @return An empty {@link Mono}.
*/
private Mono<Void> runSyncOrAsync() {
if (options.isSync()) {
return Mono.empty().then().doOnSuccess(x -> run());
} else {
return runAsync();
}
}
/**
* Runs the performance test.
*/
public abstract void run();
/**
* Runs the performance test asynchronously.
* @return An empty {@link Mono}
*/
public abstract Mono<Void> runAsync();
/**
* Stops playback tests.
* @return An empty {@link Mono}.
*/
public Mono<Void> stopPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> {
h.set("x-recording-id", recordingId);
h.set("x-purge-inmemory-recording", Boolean.toString(true));
})
.post()
.uri(options.getTestProxy().resolve("/playback/stop"))
.response()
.doOnSuccess(response -> {
testProxyPolicy.setMode(null);
testProxyPolicy.setRecordingId(null);
})
.then();
}
/**
* Runs the cleanup logic after an individual thread finishes in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> cleanupAsync() {
return Mono.empty();
}
/**
* Runs the cleanup logic after the performance test finishes.
* @return An empty {@link Mono}
*/
public Mono<Void> globalCleanupAsync() {
return Mono.empty();
}
private Mono<Void> startRecordingAsync() {
return recordPlaybackHttpClient
.post()
.uri(options.getTestProxy().resolve("/record/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
private Mono<Void> stopRecordingAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/record/stop"))
.response()
.then();
}
private Mono<Void> startPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/playback/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
} | class PerfStressTest<TOptions extends PerfStressOptions> {
private final reactor.netty.http.client.HttpClient recordPlaybackHttpClient;
private final TestProxyPolicy testProxyPolicy;
private String recordingId;
protected final TOptions options;
protected final HttpClient httpClient;
protected final Iterable<HttpPipelinePolicy> policies;
/**
* Creates an instance of performance test.
* @param options the options configured for the test.
* @throws IllegalStateException if SSL context cannot be created.
*/
public PerfStressTest(TOptions options) {
this.options = options;
final SslContext sslContext;
if (options.isInsecure()) {
try {
sslContext = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build();
} catch (SSLException e) {
throw new IllegalStateException(e);
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).build();
} else {
sslContext = null;
httpClient = null;
}
if (options.getTestProxy() != null) {
if (options.isInsecure()) {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
} else {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create();
}
testProxyPolicy = new TestProxyPolicy(options.getTestProxy());
policies = Arrays.asList(testProxyPolicy);
} else {
recordPlaybackHttpClient = null;
testProxyPolicy = null;
policies = null;
}
}
/**
* Attempts to configure a ClientBuilder using reflection. If a ClientBuilder does not follow the standard convention,
* it can be configured manually using the "httpClient" and "policies" fields.
* @param clientBuilder The client builder.
* @throws IllegalStateException If reflective access to get httpClient or addPolicy methods fail.
*/
protected void configureClientBuilder(Object clientBuilder) {
if (httpClient != null || policies != null) {
Class<?> clientBuilderClass = clientBuilder.getClass();
try {
if (httpClient != null) {
Method httpClientMethod = clientBuilderClass.getMethod("httpClient", HttpClient.class);
httpClientMethod.invoke(clientBuilder, httpClient);
}
if (policies != null) {
Method addPolicyMethod = clientBuilderClass.getMethod("addPolicy", HttpPipelinePolicy.class);
for (HttpPipelinePolicy policy : policies) {
addPolicyMethod.invoke(clientBuilder, policy);
}
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Runs the setup required prior to running the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> globalSetupAsync() {
return Mono.empty();
}
/**
* Runs the setup required prior to running an individual thread in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> setupAsync() {
return Mono.empty();
}
/**
* Records responses and starts async tests in playback mode.
* @return An empty {@link Mono}.
*/
private Mono<Void> runSyncOrAsync() {
if (options.isSync()) {
return Mono.empty().then().doOnSuccess(x -> run());
} else {
return runAsync();
}
}
/**
* Runs the performance test.
*/
public abstract void run();
/**
* Runs the performance test asynchronously.
* @return An empty {@link Mono}
*/
public abstract Mono<Void> runAsync();
/**
* Stops playback tests.
* @return An empty {@link Mono}.
*/
public Mono<Void> stopPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> {
h.set("x-recording-id", recordingId);
h.set("x-purge-inmemory-recording", Boolean.toString(true));
})
.post()
.uri(options.getTestProxy().resolve("/playback/stop"))
.response()
.doOnSuccess(response -> {
testProxyPolicy.setMode(null);
testProxyPolicy.setRecordingId(null);
})
.then();
}
/**
* Runs the cleanup logic after an individual thread finishes in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> cleanupAsync() {
return Mono.empty();
}
/**
* Runs the cleanup logic after the performance test finishes.
* @return An empty {@link Mono}
*/
public Mono<Void> globalCleanupAsync() {
return Mono.empty();
}
private Mono<Void> startRecordingAsync() {
return recordPlaybackHttpClient
.post()
.uri(options.getTestProxy().resolve("/record/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
private Mono<Void> stopRecordingAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/record/stop"))
.response()
.then();
}
private Mono<Void> startPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/playback/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
} |
Once per parallel instance of the test. The perf framework guarantees that `runAsync()` is called sequentially per instance of the test class, so there should be no race condition here, if that's what you were suggesting. | public Mono<Void> runAsync() {
if (_firstRun) {
_firstRun = false;
return sendRequest().repeat(options.getFirstRunExtraRequests()).then();
}
else {
return sendRequest();
}
} | _firstRun = false; | public Mono<Void> runAsync() {
if (firstRun) {
firstRun = false;
return sendRequest().repeat(options.getFirstRunExtraRequests()).then();
}
else {
return sendRequest();
}
} | class HttpPipelineTest extends PerfStressTest<HttpPipelineOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private final HttpPipeline httpPipeline;
private final byte[] buffer = new byte[BUFFER_SIZE];
private boolean _firstRun = true;
/**
* Creates an instance of the {@link HttpPipelineTest}.
* @param options options to configure the HTTP pipeline.
*/
public HttpPipelineTest(HttpPipelineOptions options) {
super(options);
HttpPipelineBuilder builder = new HttpPipelineBuilder();
if (httpClient != null) {
builder = builder.httpClient(httpClient);
}
if (policies != null) {
ArrayList<HttpPipelinePolicy> policyList = new ArrayList<>();
for (HttpPipelinePolicy policy : policies) {
policyList.add(policy);
}
builder.policies(policyList.toArray(new HttpPipelinePolicy[0]));
}
httpPipeline = builder.build();
}
@Override
public void run() {
runAsync().block();
}
@Override
public Mono<Void> sendRequest() {
HttpRequest request = new HttpRequest(HttpMethod.GET, options.getUrl());
return httpPipeline
.send(request)
.flatMapMany(HttpResponse::getBody)
.map(b -> {
int readCount = 0;
int remaining = b.remaining();
while (readCount < remaining) {
int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE);
b.get(buffer, 0, expectedReadCount);
readCount += expectedReadCount;
}
return 1;
})
.then();
}
} | class HttpPipelineTest extends PerfStressTest<HttpPipelineOptions> {
private static final int BUFFER_SIZE = 16 * 1024 * 1024;
private final HttpPipeline httpPipeline;
private final byte[] buffer = new byte[BUFFER_SIZE];
private boolean firstRun = true;
/**
* Creates an instance of the {@link HttpPipelineTest}.
* @param options options to configure the HTTP pipeline.
*/
public HttpPipelineTest(HttpPipelineOptions options) {
super(options);
HttpPipelineBuilder builder = new HttpPipelineBuilder();
if (httpClient != null) {
builder = builder.httpClient(httpClient);
}
if (policies != null) {
ArrayList<HttpPipelinePolicy> policyList = new ArrayList<>();
for (HttpPipelinePolicy policy : policies) {
policyList.add(policy);
}
builder.policies(policyList.toArray(new HttpPipelinePolicy[0]));
}
httpPipeline = builder.build();
}
@Override
public void run() {
runAsync().block();
}
@Override
public Mono<Void> sendRequest() {
HttpRequest request = new HttpRequest(HttpMethod.GET, options.getUrl());
return httpPipeline
.send(request)
.flatMapMany(HttpResponse::getBody)
.map(b -> {
int readCount = 0;
int remaining = b.remaining();
while (readCount < remaining) {
int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE);
b.get(buffer, 0, expectedReadCount);
readCount += expectedReadCount;
}
return 1;
})
.then();
}
} |
The change to the perf framework itself is to **always** call `run()/runAsync()` once before starting recording. The reason is that the first call to `run()/runAsync()` might perform some initial requests like authorization, and we'd prefer to record the steady-state behavior rather than the unique behavior on the first run. If the first call to `run()/runAsync()` doesn't perform extra requests, this is an unnecessary waste of time, but it should be harmless and relatively fast. The "first run requires extra requests" is just a feature of `HttpPipelineTest` I am using to test this code change. | public Mono<Void> recordAndStartPlaybackAsync() {
return runSyncOrAsync()
.then(startRecordingAsync())
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("record");
})
.then(Mono.defer(() -> runSyncOrAsync()))
.then(Mono.defer(() -> stopRecordingAsync()))
.then(Mono.defer(() -> startPlaybackAsync()))
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("playback");
});
} | return runSyncOrAsync() | public Mono<Void> recordAndStartPlaybackAsync() {
return runSyncOrAsync()
.then(startRecordingAsync())
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("record");
})
.then(Mono.defer(() -> runSyncOrAsync()))
.then(Mono.defer(() -> stopRecordingAsync()))
.then(Mono.defer(() -> startPlaybackAsync()))
.doOnSuccess(x -> {
testProxyPolicy.setRecordingId(recordingId);
testProxyPolicy.setMode("playback");
});
} | class PerfStressTest<TOptions extends PerfStressOptions> {
private final reactor.netty.http.client.HttpClient recordPlaybackHttpClient;
private final TestProxyPolicy testProxyPolicy;
private String recordingId;
protected final TOptions options;
protected final HttpClient httpClient;
protected final Iterable<HttpPipelinePolicy> policies;
/**
* Creates an instance of performance test.
* @param options the options configured for the test.
* @throws IllegalStateException if SSL context cannot be created.
*/
public PerfStressTest(TOptions options) {
this.options = options;
final SslContext sslContext;
if (options.isInsecure()) {
try {
sslContext = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build();
} catch (SSLException e) {
throw new IllegalStateException(e);
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).build();
} else {
sslContext = null;
httpClient = null;
}
if (options.getTestProxy() != null) {
if (options.isInsecure()) {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
} else {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create();
}
testProxyPolicy = new TestProxyPolicy(options.getTestProxy());
policies = Arrays.asList(testProxyPolicy);
} else {
recordPlaybackHttpClient = null;
testProxyPolicy = null;
policies = null;
}
}
/**
* Attempts to configure a ClientBuilder using reflection. If a ClientBuilder does not follow the standard convention,
* it can be configured manually using the "httpClient" and "policies" fields.
* @param clientBuilder The client builder.
* @throws IllegalStateException If reflective access to get httpClient or addPolicy methods fail.
*/
protected void configureClientBuilder(Object clientBuilder) {
if (httpClient != null || policies != null) {
Class<?> clientBuilderClass = clientBuilder.getClass();
try {
if (httpClient != null) {
Method httpClientMethod = clientBuilderClass.getMethod("httpClient", HttpClient.class);
httpClientMethod.invoke(clientBuilder, httpClient);
}
if (policies != null) {
Method addPolicyMethod = clientBuilderClass.getMethod("addPolicy", HttpPipelinePolicy.class);
for (HttpPipelinePolicy policy : policies) {
addPolicyMethod.invoke(clientBuilder, policy);
}
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Runs the setup required prior to running the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> globalSetupAsync() {
return Mono.empty();
}
/**
* Runs the setup required prior to running an individual thread in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> setupAsync() {
return Mono.empty();
}
/**
* Records responses and starts async tests in playback mode.
* @return An empty {@link Mono}.
*/
private Mono<Void> runSyncOrAsync() {
if (options.isSync()) {
return Mono.empty().then().doOnSuccess(x -> run());
} else {
return runAsync();
}
}
/**
* Runs the performance test.
*/
public abstract void run();
/**
* Runs the performance test asynchronously.
* @return An empty {@link Mono}
*/
public abstract Mono<Void> runAsync();
/**
* Stops playback tests.
* @return An empty {@link Mono}.
*/
public Mono<Void> stopPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> {
h.set("x-recording-id", recordingId);
h.set("x-purge-inmemory-recording", Boolean.toString(true));
})
.post()
.uri(options.getTestProxy().resolve("/playback/stop"))
.response()
.doOnSuccess(response -> {
testProxyPolicy.setMode(null);
testProxyPolicy.setRecordingId(null);
})
.then();
}
/**
* Runs the cleanup logic after an individual thread finishes in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> cleanupAsync() {
return Mono.empty();
}
/**
* Runs the cleanup logic after the performance test finishes.
* @return An empty {@link Mono}
*/
public Mono<Void> globalCleanupAsync() {
return Mono.empty();
}
private Mono<Void> startRecordingAsync() {
return recordPlaybackHttpClient
.post()
.uri(options.getTestProxy().resolve("/record/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
private Mono<Void> stopRecordingAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/record/stop"))
.response()
.then();
}
private Mono<Void> startPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/playback/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
} | class PerfStressTest<TOptions extends PerfStressOptions> {
private final reactor.netty.http.client.HttpClient recordPlaybackHttpClient;
private final TestProxyPolicy testProxyPolicy;
private String recordingId;
protected final TOptions options;
protected final HttpClient httpClient;
protected final Iterable<HttpPipelinePolicy> policies;
/**
* Creates an instance of performance test.
* @param options the options configured for the test.
* @throws IllegalStateException if SSL context cannot be created.
*/
public PerfStressTest(TOptions options) {
this.options = options;
final SslContext sslContext;
if (options.isInsecure()) {
try {
sslContext = SslContextBuilder.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build();
} catch (SSLException e) {
throw new IllegalStateException(e);
}
reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient).build();
} else {
sslContext = null;
httpClient = null;
}
if (options.getTestProxy() != null) {
if (options.isInsecure()) {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create()
.secure(sslContextSpec -> sslContextSpec.sslContext(sslContext));
} else {
recordPlaybackHttpClient = reactor.netty.http.client.HttpClient.create();
}
testProxyPolicy = new TestProxyPolicy(options.getTestProxy());
policies = Arrays.asList(testProxyPolicy);
} else {
recordPlaybackHttpClient = null;
testProxyPolicy = null;
policies = null;
}
}
/**
* Attempts to configure a ClientBuilder using reflection. If a ClientBuilder does not follow the standard convention,
* it can be configured manually using the "httpClient" and "policies" fields.
* @param clientBuilder The client builder.
* @throws IllegalStateException If reflective access to get httpClient or addPolicy methods fail.
*/
protected void configureClientBuilder(Object clientBuilder) {
if (httpClient != null || policies != null) {
Class<?> clientBuilderClass = clientBuilder.getClass();
try {
if (httpClient != null) {
Method httpClientMethod = clientBuilderClass.getMethod("httpClient", HttpClient.class);
httpClientMethod.invoke(clientBuilder, httpClient);
}
if (policies != null) {
Method addPolicyMethod = clientBuilderClass.getMethod("addPolicy", HttpPipelinePolicy.class);
for (HttpPipelinePolicy policy : policies) {
addPolicyMethod.invoke(clientBuilder, policy);
}
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Runs the setup required prior to running the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> globalSetupAsync() {
return Mono.empty();
}
/**
* Runs the setup required prior to running an individual thread in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> setupAsync() {
return Mono.empty();
}
/**
* Records responses and starts async tests in playback mode.
* @return An empty {@link Mono}.
*/
private Mono<Void> runSyncOrAsync() {
if (options.isSync()) {
return Mono.empty().then().doOnSuccess(x -> run());
} else {
return runAsync();
}
}
/**
* Runs the performance test.
*/
public abstract void run();
/**
* Runs the performance test asynchronously.
* @return An empty {@link Mono}
*/
public abstract Mono<Void> runAsync();
/**
* Stops playback tests.
* @return An empty {@link Mono}.
*/
public Mono<Void> stopPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> {
h.set("x-recording-id", recordingId);
h.set("x-purge-inmemory-recording", Boolean.toString(true));
})
.post()
.uri(options.getTestProxy().resolve("/playback/stop"))
.response()
.doOnSuccess(response -> {
testProxyPolicy.setMode(null);
testProxyPolicy.setRecordingId(null);
})
.then();
}
/**
* Runs the cleanup logic after an individual thread finishes in the performance test.
* @return An empty {@link Mono}
*/
public Mono<Void> cleanupAsync() {
return Mono.empty();
}
/**
* Runs the cleanup logic after the performance test finishes.
* @return An empty {@link Mono}
*/
public Mono<Void> globalCleanupAsync() {
return Mono.empty();
}
private Mono<Void> startRecordingAsync() {
return recordPlaybackHttpClient
.post()
.uri(options.getTestProxy().resolve("/record/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
private Mono<Void> stopRecordingAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/record/stop"))
.response()
.then();
}
private Mono<Void> startPlaybackAsync() {
return recordPlaybackHttpClient
.headers(h -> h.set("x-recording-id", recordingId))
.post()
.uri(options.getTestProxy().resolve("/playback/start"))
.response()
.doOnNext(response -> {
recordingId = response.responseHeaders().get("x-recording-id");
})
.then();
}
} |
seems this constructor is introduced to only be used in the test. we should try to avoid adding code in the main code which is only used in the test. In the test you can either use the other constructor or the better approach is to use mockito to instantiate an instance of `RetryWithException` ```suggestion RetryWithException retryWithException = Mockito.mock(RetryWithException.class) ``` | public void retryWithDefaultTimeouts() {
int defaultInitialDelayInMs = 10;
int defaultSalt = 5;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
RetryWithException retryWithException = new RetryWithException();
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(retryWithException);
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
validateRetryWithTimeRange(defaultInitialDelayInMs, shouldRetryResult, defaultSalt);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(retryWithException);
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
defaultInitialDelayInMs = defaultInitialDelayInMs * 2;
validateRetryWithTimeRange(defaultInitialDelayInMs, shouldRetryResult, defaultSalt);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(retryWithException);
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
defaultInitialDelayInMs = defaultInitialDelayInMs * 2;
validateRetryWithTimeRange(defaultInitialDelayInMs, shouldRetryResult, defaultSalt);
} | RetryWithException retryWithException = new RetryWithException(); | public void retryWithDefaultTimeouts() {
int defaultInitialDelayInMs = 10;
int defaultSalt = 5;
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
RetryWithException retryWithException = Mockito.mock(RetryWithException.class);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(retryWithException);
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
validateRetryWithTimeRange(defaultInitialDelayInMs, shouldRetryResult, defaultSalt);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(retryWithException);
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
defaultInitialDelayInMs = defaultInitialDelayInMs * 2;
validateRetryWithTimeRange(defaultInitialDelayInMs, shouldRetryResult, defaultSalt);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(retryWithException);
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
defaultInitialDelayInMs = defaultInitialDelayInMs * 2;
validateRetryWithTimeRange(defaultInitialDelayInMs, shouldRetryResult, defaultSalt);
} | class GoneAndRetryWithRetryPolicyTest {
protected static final int TIMEOUT = 60000;
/**
* Retry with GoneException for read, retried 4 times and verified the returned
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryReadWithGoneException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new GoneException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4);
}
/**
* Retry with GoneException for write which is not yet sent to the wire,
* retried 4 times and verified the returned
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryNotYetFlushedWriteWithGoneException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Supplier<GoneException> goneExceptionForNotYetFlushedRequestSupplier = () -> {
GoneException goneExceptionForNotYetFlushedRequest = new GoneException();
BridgeInternal.setSendingRequestStarted(goneExceptionForNotYetFlushedRequest, false);
return goneExceptionForNotYetFlushedRequest;
};
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4);
}
/**
* GoneException for write which is already sent to the wire, should not result in retry,
* but an address refresh should be triggered
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldNotRetryFlushedWriteWithGoneExceptionButForceAddressRefresh() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
Supplier<GoneException> goneExceptionForFlushedRequestSupplier = () -> {
GoneException goneExceptionForFlushedRequest = new GoneException();
BridgeInternal.setSendingRequestStarted(goneExceptionForFlushedRequest, true);
return goneExceptionForFlushedRequest;
};
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.policyArg).isNotNull();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.backOffTime).isNull();
}
/**
* GoneException for write which is already sent to the wire but based on receiving
* an actual response from the Service with 410 Status Code and SubStatusCode 0
* should result in retry
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryFlushedWriteWithGoneExceptionFromService() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy =
new GoneAndRetryWithRetryPolicy(request, 30);
Supplier<GoneException> goneExceptionForFlushedRequestSupplier = () -> {
GoneException goneExceptionForFlushedRequest = new GoneException();
BridgeInternal.setSendingRequestStarted(goneExceptionForFlushedRequest, true);
goneExceptionForFlushedRequest.setIsBasedOn410ResponseFromService();
return goneExceptionForFlushedRequest;
};
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4);
}
/**
* RequestTimeoutExceptions should not be retried for read or write - no address cache refresh expected
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldNotRetryRequestTimeoutException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new RequestTimeoutException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.policyArg).isNull();
assertThat(shouldRetryResult.backOffTime).isNull();
request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new RequestTimeoutException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.policyArg).isNull();
assertThat(shouldRetryResult.backOffTime).isNull();
}
/**
* Retry with PartitionIsMigratingException
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithPartitionIsMigratingException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new PartitionIsMigratingException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(request.forceCollectionRoutingMapRefresh).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
}
/**
* Retry with InvalidPartitionException
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithInvalidPartitionException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new InvalidPartitionException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(request.requestContext.quorumSelectedLSN).isEqualTo(-1);
assertThat(request.requestContext.resolvedPartitionKeyRange).isNull();
assertThat(request.requestContext.globalCommittedSelectedLSN).isEqualTo(-1);
assertThat(shouldRetryResult.policyArg.getValue0()).isFalse();
goneAndRetryWithRetryPolicy.shouldRetry(new InvalidPartitionException()).block();
shouldRetryResult = goneAndRetryWithRetryPolicy.shouldRetry(new InvalidPartitionException()).block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
CosmosException clientException = (CosmosException) shouldRetryResult.exception;
assertThat(clientException.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE);
}
/**
* Retry with PartitionKeyRangeIsSplittingException
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithPartitionKeyRangeIsSplittingException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new PartitionKeyRangeIsSplittingException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(request.forcePartitionKeyRangeRefresh).isTrue();
assertThat(request.requestContext.resolvedPartitionKeyRange).isNull();
assertThat(request.requestContext.quorumSelectedLSN).isEqualTo(-1);
assertThat(shouldRetryResult.policyArg.getValue0()).isFalse();
}
/**
* No retry on bad request exception
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithGenericException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new BadRequestException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
}
/**
* Test for custom retryWith values
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
private static void validateRetryWithTimeRange(
int expectedDelayInMs,
ShouldRetryResult retryResult,
Integer saltValueInMs) {
assertThat(retryResult.shouldRetry).isTrue();
assertThat(retryResult.backOffTime.toMillis() >= 0).isTrue();
assertThat(retryResult.backOffTime.toMillis() > expectedDelayInMs - saltValueInMs).isTrue();
assertThat(retryResult.backOffTime.toMillis() < expectedDelayInMs + saltValueInMs).isTrue();
}
} | class GoneAndRetryWithRetryPolicyTest {
protected static final int TIMEOUT = 60000;
/**
* Retry with GoneException for read, retried 4 times and verified the returned
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryReadWithGoneException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new GoneException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(new GoneException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4);
}
/**
* Retry with GoneException for write which is not yet sent to the wire,
* retried 4 times and verified the returned
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryNotYetFlushedWriteWithGoneException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Supplier<GoneException> goneExceptionForNotYetFlushedRequestSupplier = () -> {
GoneException goneExceptionForNotYetFlushedRequest = new GoneException();
BridgeInternal.setSendingRequestStarted(goneExceptionForNotYetFlushedRequest, false);
return goneExceptionForNotYetFlushedRequest;
};
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForNotYetFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4);
}
/**
* GoneException for write which is already sent to the wire, should not result in retry,
* but an address refresh should be triggered
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldNotRetryFlushedWriteWithGoneExceptionButForceAddressRefresh() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
Supplier<GoneException> goneExceptionForFlushedRequestSupplier = () -> {
GoneException goneExceptionForFlushedRequest = new GoneException();
BridgeInternal.setSendingRequestStarted(goneExceptionForFlushedRequest, true);
return goneExceptionForFlushedRequest;
};
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.policyArg).isNotNull();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.backOffTime).isNull();
}
/**
* GoneException for write which is already sent to the wire but based on receiving
* an actual response from the Service with 410 Status Code and SubStatusCode 0
* should result in retry
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryFlushedWriteWithGoneExceptionFromService() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy =
new GoneAndRetryWithRetryPolicy(request, 30);
Supplier<GoneException> goneExceptionForFlushedRequestSupplier = () -> {
GoneException goneExceptionForFlushedRequest = new GoneException();
BridgeInternal.setSendingRequestStarted(goneExceptionForFlushedRequest, true);
goneExceptionForFlushedRequest.setIsBasedOn410ResponseFromService();
return goneExceptionForFlushedRequest;
};
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(1);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(0);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(2);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(1);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(3);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(2);
singleShouldRetry = goneAndRetryWithRetryPolicy.shouldRetry(goneExceptionForFlushedRequestSupplier.get());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
assertThat(shouldRetryResult.policyArg.getValue3()).isEqualTo(4);
assertThat(shouldRetryResult.backOffTime.getSeconds()).isEqualTo(4);
}
/**
* RequestTimeoutExceptions should not be retried for read or write - no address cache refresh expected
* shouldRetryResult. ShouldRetryResult
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldNotRetryRequestTimeoutException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new RequestTimeoutException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.policyArg).isNull();
assertThat(shouldRetryResult.backOffTime).isNull();
request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Create,
ResourceType.Document);
goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new RequestTimeoutException());
shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.policyArg).isNull();
assertThat(shouldRetryResult.backOffTime).isNull();
}
/**
* Retry with PartitionIsMigratingException
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithPartitionIsMigratingException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new PartitionIsMigratingException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(request.forceCollectionRoutingMapRefresh).isTrue();
assertThat(shouldRetryResult.policyArg.getValue0()).isTrue();
}
/**
* Retry with InvalidPartitionException
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithInvalidPartitionException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new InvalidPartitionException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(request.requestContext.quorumSelectedLSN).isEqualTo(-1);
assertThat(request.requestContext.resolvedPartitionKeyRange).isNull();
assertThat(request.requestContext.globalCommittedSelectedLSN).isEqualTo(-1);
assertThat(shouldRetryResult.policyArg.getValue0()).isFalse();
goneAndRetryWithRetryPolicy.shouldRetry(new InvalidPartitionException()).block();
shouldRetryResult = goneAndRetryWithRetryPolicy.shouldRetry(new InvalidPartitionException()).block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
CosmosException clientException = (CosmosException) shouldRetryResult.exception;
assertThat(clientException.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE);
}
/**
* Retry with PartitionKeyRangeIsSplittingException
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithPartitionKeyRangeIsSplittingException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new PartitionKeyRangeIsSplittingException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isTrue();
assertThat(request.forcePartitionKeyRangeRefresh).isTrue();
assertThat(request.requestContext.resolvedPartitionKeyRange).isNull();
assertThat(request.requestContext.quorumSelectedLSN).isEqualTo(-1);
assertThat(shouldRetryResult.policyArg.getValue0()).isFalse();
}
/**
* No retry on bad request exception
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithGenericException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 30);
Mono<ShouldRetryResult> singleShouldRetry = goneAndRetryWithRetryPolicy
.shouldRetry(new BadRequestException());
ShouldRetryResult shouldRetryResult = singleShouldRetry.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
}
/**
* Test for custom retryWith values
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
private static void validateRetryWithTimeRange(
int expectedDelayInMs,
ShouldRetryResult retryResult,
Integer saltValueInMs) {
assertThat(retryResult.shouldRetry).isTrue();
assertThat(retryResult.backOffTime.toMillis() >= 0).isTrue();
assertThat(retryResult.backOffTime.toMillis() > expectedDelayInMs - saltValueInMs).isTrue();
assertThat(retryResult.backOffTime.toMillis() < expectedDelayInMs + saltValueInMs).isTrue();
}
/**
* After waitTimeInSeconds exhausted, retryWithException will not be retried.
*/
@Test(groups = { "unit" }, timeOut = TIMEOUT)
public void shouldRetryWithRetryWithException() {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
mockDiagnosticsClientContext(),
OperationType.Read,
ResourceType.Document);
GoneAndRetryWithRetryPolicy goneAndRetryWithRetryPolicy = new GoneAndRetryWithRetryPolicy(request, 1);
ShouldRetryResult shouldRetryResult = Mono.delay(Duration.ofSeconds(1))
.flatMap(t -> goneAndRetryWithRetryPolicy.shouldRetry(new RetryWithException("Test", null, null)))
.block();
assertThat(shouldRetryResult.shouldRetry).isFalse();
assertThat(shouldRetryResult.nonRelatedException).isFalse();
}
} |
We should instead catch NumberFormatException when Long.parseLong throws and return null and not depend on `n/a` equality. Any non-numeric value should result in null duration. | public Duration getChannelLatency() {
if ("n/a".equals(this.channelLatency)) {
return null;
}
return Duration.ofMillis(Long.parseLong(this.channelLatency));
} | return Duration.ofMillis(Long.parseLong(this.channelLatency)); | public Duration getChannelLatency() {
if ("n/a".equals(this.channelLatencyMs)) {
return null;
}
Long channelLatencyMsLong;
try {
channelLatencyMsLong = Long.parseLong(this.channelLatencyMs);
} catch (NumberFormatException ex) {
LOGGER.logExceptionAsError(ex);
return null;
}
return Duration.ofMillis(channelLatencyMsLong);
} | class MediaLiveEventChannelArchiveHeartbeatEventData {
/*
* Gets the channel latency in ms.
*/
@JsonProperty(value = "channelLatencyMs", required = true, access = JsonProperty.Access.WRITE_ONLY)
private String channelLatency;
/*
* Gets the latency result code.
*/
@JsonProperty(value = "latencyResultCode", required = true, access = JsonProperty.Access.WRITE_ONLY)
private String latencyResultCode;
/**
* Get the channelLatencyMs property: Gets the channel latency in ms.
*
* @return the channelLatencyMs value.
*/
/**
* Get the latencyResultCode property: Gets the latency result code.
*
* @return the latencyResultCode value.
*/
public String getLatencyResultCode() {
return this.latencyResultCode;
}
} | class MediaLiveEventChannelArchiveHeartbeatEventData {
static final ClientLogger LOGGER = new ClientLogger(MediaLiveEventChannelArchiveHeartbeatEventData.class);
/*
* Gets the channel latency in ms.
*/
@JsonProperty(value = "channelLatencyMs", required = true, access = JsonProperty.Access.WRITE_ONLY)
private String channelLatencyMs;
/*
* Gets the latency result code.
*/
@JsonProperty(value = "latencyResultCode", required = true, access = JsonProperty.Access.WRITE_ONLY)
private String latencyResultCode;
/**
* Gets the duration of channel latency.
*
* @return the duration of channel latency.
*/
/**
* Get the latencyResultCode property: Gets the latency result code.
*
* @return the latencyResultCode value.
*/
public String getLatencyResultCode() {
return this.latencyResultCode;
}
} |
Same here, instead of relying on `n/a` equality check, catch NumberFormatException when you parseInt and return null. | public Integer getIngestDriftValue() {
if ("n/a".equals(this.ingestDriftValue)) {
return null;
}
return Integer.parseInt(this.ingestDriftValue);
} | return Integer.parseInt(this.ingestDriftValue); | public Integer getIngestDriftValue() {
if ("n/a".equals(this.ingestDriftValue)) {
return null;
}
try {
return Integer.parseInt(this.ingestDriftValue);
} catch (NumberFormatException ex) {
LOGGER.logExceptionAsError(ex);
return null;
}
} | class MediaLiveEventIngestHeartbeatEventData {
/*
* Gets the type of the track (Audio / Video).
*/
@JsonProperty(value = "trackType", access = JsonProperty.Access.WRITE_ONLY)
private String trackType;
/*
* Gets the track name.
*/
@JsonProperty(value = "trackName", access = JsonProperty.Access.WRITE_ONLY)
private String trackName;
/*
* Gets the Live Transcription language.
*/
@JsonProperty(value = "transcriptionLanguage", access = JsonProperty.Access.WRITE_ONLY)
private String transcriptionLanguage;
/*
* Gets the Live Transcription state.
*/
@JsonProperty(value = "transcriptionState", access = JsonProperty.Access.WRITE_ONLY)
private String transcriptionState;
/*
* Gets the bitrate of the track.
*/
@JsonProperty(value = "bitrate", access = JsonProperty.Access.WRITE_ONLY)
private Long bitrate;
/*
* Gets the incoming bitrate.
*/
@JsonProperty(value = "incomingBitrate", access = JsonProperty.Access.WRITE_ONLY)
private Long incomingBitrate;
/*
* Gets the track ingest drift value.
*/
@JsonProperty(value = "ingestDriftValue", access = JsonProperty.Access.WRITE_ONLY)
private String ingestDriftValue;
/*
* Gets the arrival UTC time of the last fragment.
*/
@JsonProperty(value = "lastFragmentArrivalTime", access = JsonProperty.Access.WRITE_ONLY)
private String lastFragmentArrivalTime;
/*
* Gets the last timestamp.
*/
@JsonProperty(value = "lastTimestamp", access = JsonProperty.Access.WRITE_ONLY)
private String lastTimestamp;
/*
* Gets the timescale of the last timestamp.
*/
@JsonProperty(value = "timescale", access = JsonProperty.Access.WRITE_ONLY)
private String timescale;
/*
* Gets the fragment Overlap count.
*/
@JsonProperty(value = "overlapCount", access = JsonProperty.Access.WRITE_ONLY)
private Long overlapCount;
/*
* Gets the fragment Discontinuity count.
*/
@JsonProperty(value = "discontinuityCount", access = JsonProperty.Access.WRITE_ONLY)
private Long discontinuityCount;
/*
* Gets Non increasing count.
*/
@JsonProperty(value = "nonincreasingCount", access = JsonProperty.Access.WRITE_ONLY)
private Long nonincreasingCount;
/*
* Gets a value indicating whether unexpected bitrate is present or not.
*/
@JsonProperty(value = "unexpectedBitrate", access = JsonProperty.Access.WRITE_ONLY)
private Boolean unexpectedBitrate;
/*
* Gets the state of the live event.
*/
@JsonProperty(value = "state", access = JsonProperty.Access.WRITE_ONLY)
private String state;
/*
* Gets a value indicating whether preview is healthy or not.
*/
@JsonProperty(value = "healthy", access = JsonProperty.Access.WRITE_ONLY)
private Boolean healthy;
/**
* Get the trackType property: Gets the type of the track (Audio / Video).
*
* @return the trackType value.
*/
public String getTrackType() {
return this.trackType;
}
/**
* Get the trackName property: Gets the track name.
*
* @return the trackName value.
*/
public String getTrackName() {
return this.trackName;
}
/**
* Get the transcriptionLanguage property: Gets the Live Transcription language.
*
* @return the transcriptionLanguage value.
*/
public String getTranscriptionLanguage() {
return this.transcriptionLanguage;
}
/**
* Get the transcriptionState property: Gets the Live Transcription state.
*
* @return the transcriptionState value.
*/
public String getTranscriptionState() {
return this.transcriptionState;
}
/**
* Get the bitrate property: Gets the bitrate of the track.
*
* @return the bitrate value.
*/
public Long getBitrate() {
return this.bitrate;
}
/**
* Get the incomingBitrate property: Gets the incoming bitrate.
*
* @return the incomingBitrate value.
*/
public Long getIncomingBitrate() {
return this.incomingBitrate;
}
/**
* Get the ingestDriftValue property: Gets the track ingest drift value.
*
* @return the ingestDriftValue value.
*/
/**
* Get the lastFragmentArrivalTime property: Gets the arrival UTC time of the last fragment.
*
* @return the lastFragmentArrivalTime value.
*/
public OffsetDateTime getLastFragmentArrivalTime() {
return OffsetDateTime.parse(this.lastFragmentArrivalTime);
}
/**
* Get the lastTimestamp property: Gets the last timestamp.
*
* @return the lastTimestamp value.
*/
public String getLastTimestamp() {
return this.lastTimestamp;
}
/**
* Get the timescale property: Gets the timescale of the last timestamp.
*
* @return the timescale value.
*/
public String getTimescale() {
return this.timescale;
}
/**
* Get the overlapCount property: Gets the fragment Overlap count.
*
* @return the overlapCount value.
*/
public Long getOverlapCount() {
return this.overlapCount;
}
/**
* Get the discontinuityCount property: Gets the fragment Discontinuity count.
*
* @return the discontinuityCount value.
*/
public Long getDiscontinuityCount() {
return this.discontinuityCount;
}
/**
* Get the nonincreasingCount property: Gets Non increasing count.
*
* @return the nonincreasingCount value.
*/
public Long getNonincreasingCount() {
return this.nonincreasingCount;
}
/**
* Get the unexpectedBitrate property: Gets a value indicating whether unexpected bitrate is present or not.
*
* @return the unexpectedBitrate value.
*/
public Boolean isUnexpectedBitrate() {
return this.unexpectedBitrate;
}
/**
* Get the state property: Gets the state of the live event.
*
* @return the state value.
*/
public String getState() {
return this.state;
}
/**
* Get the healthy property: Gets a value indicating whether preview is healthy or not.
*
* @return the healthy value.
*/
public Boolean isHealthy() {
return this.healthy;
}
} | class MediaLiveEventIngestHeartbeatEventData {
static final ClientLogger LOGGER = new ClientLogger(MediaLiveEventIngestHeartbeatEventData.class);
/*
* Gets the type of the track (Audio / Video).
*/
@JsonProperty(value = "trackType", access = JsonProperty.Access.WRITE_ONLY)
private String trackType;
/*
* Gets the track name.
*/
@JsonProperty(value = "trackName", access = JsonProperty.Access.WRITE_ONLY)
private String trackName;
/*
* Gets the Live Transcription language.
*/
@JsonProperty(value = "transcriptionLanguage", access = JsonProperty.Access.WRITE_ONLY)
private String transcriptionLanguage;
/*
* Gets the Live Transcription state.
*/
@JsonProperty(value = "transcriptionState", access = JsonProperty.Access.WRITE_ONLY)
private String transcriptionState;
/*
* Gets the bitrate of the track.
*/
@JsonProperty(value = "bitrate", access = JsonProperty.Access.WRITE_ONLY)
private Long bitrate;
/*
* Gets the incoming bitrate.
*/
@JsonProperty(value = "incomingBitrate", access = JsonProperty.Access.WRITE_ONLY)
private Long incomingBitrate;
/*
* Gets the track ingest drift value.
*/
@JsonProperty(value = "ingestDriftValue", access = JsonProperty.Access.WRITE_ONLY)
private String ingestDriftValue;
/*
* Gets the arrival UTC time of the last fragment.
*/
@JsonProperty(value = "lastFragmentArrivalTime", access = JsonProperty.Access.WRITE_ONLY)
private String lastFragmentArrivalTime;
/*
* Gets the last timestamp.
*/
@JsonProperty(value = "lastTimestamp", access = JsonProperty.Access.WRITE_ONLY)
private String lastTimestamp;
/*
* Gets the timescale of the last timestamp.
*/
@JsonProperty(value = "timescale", access = JsonProperty.Access.WRITE_ONLY)
private String timescale;
/*
* Gets the fragment Overlap count.
*/
@JsonProperty(value = "overlapCount", access = JsonProperty.Access.WRITE_ONLY)
private Long overlapCount;
/*
* Gets the fragment Discontinuity count.
*/
@JsonProperty(value = "discontinuityCount", access = JsonProperty.Access.WRITE_ONLY)
private Long discontinuityCount;
/*
* Gets Non increasing count.
*/
@JsonProperty(value = "nonincreasingCount", access = JsonProperty.Access.WRITE_ONLY)
private Long nonincreasingCount;
/*
* Gets a value indicating whether unexpected bitrate is present or not.
*/
@JsonProperty(value = "unexpectedBitrate", access = JsonProperty.Access.WRITE_ONLY)
private Boolean unexpectedBitrate;
/*
* Gets the state of the live event.
*/
@JsonProperty(value = "state", access = JsonProperty.Access.WRITE_ONLY)
private String state;
/*
* Gets a value indicating whether preview is healthy or not.
*/
@JsonProperty(value = "healthy", access = JsonProperty.Access.WRITE_ONLY)
private Boolean healthy;
/**
* Get the trackType property: Gets the type of the track (Audio / Video).
*
* @return the trackType value.
*/
public String getTrackType() {
return this.trackType;
}
/**
* Get the trackName property: Gets the track name.
*
* @return the trackName value.
*/
public String getTrackName() {
return this.trackName;
}
/**
* Get the transcriptionLanguage property: Gets the Live Transcription language.
*
* @return the transcriptionLanguage value.
*/
public String getTranscriptionLanguage() {
return this.transcriptionLanguage;
}
/**
* Get the transcriptionState property: Gets the Live Transcription state.
*
* @return the transcriptionState value.
*/
public String getTranscriptionState() {
return this.transcriptionState;
}
/**
* Get the bitrate property: Gets the bitrate of the track.
*
* @return the bitrate value.
*/
public Long getBitrate() {
return this.bitrate;
}
/**
* Get the incomingBitrate property: Gets the incoming bitrate.
*
* @return the incomingBitrate value.
*/
public Long getIncomingBitrate() {
return this.incomingBitrate;
}
/**
* Get the ingestDriftValue property: Gets the track ingest drift value.
*
* @return the ingestDriftValue value.
*/
/**
* Get the lastFragmentArrivalTime property: Gets the arrival UTC time of the last fragment.
*
* @return the lastFragmentArrivalTime value.
*/
public OffsetDateTime getLastFragmentArrivalTime() {
return OffsetDateTime.parse(this.lastFragmentArrivalTime);
}
/**
* Get the lastTimestamp property: Gets the last timestamp.
*
* @return the lastTimestamp value.
*/
public String getLastTimestamp() {
return this.lastTimestamp;
}
/**
* Get the timescale property: Gets the timescale of the last timestamp.
*
* @return the timescale value.
*/
public String getTimescale() {
return this.timescale;
}
/**
* Get the overlapCount property: Gets the fragment Overlap count.
*
* @return the overlapCount value.
*/
public Long getOverlapCount() {
return this.overlapCount;
}
/**
* Get the discontinuityCount property: Gets the fragment Discontinuity count.
*
* @return the discontinuityCount value.
*/
public Long getDiscontinuityCount() {
return this.discontinuityCount;
}
/**
* Get the nonincreasingCount property: Gets Non increasing count.
*
* @return the nonincreasingCount value.
*/
public Long getNonincreasingCount() {
return this.nonincreasingCount;
}
/**
* Get the unexpectedBitrate property: Gets a value indicating whether unexpected bitrate is present or not.
*
* @return the unexpectedBitrate value.
*/
public Boolean isUnexpectedBitrate() {
return this.unexpectedBitrate;
}
/**
* Get the state property: Gets the state of the live event.
*
* @return the state value.
*/
public String getState() {
return this.state;
}
/**
* Get the healthy property: Gets a value indicating whether preview is healthy or not.
*
* @return the healthy value.
*/
public Boolean isHealthy() {
return this.healthy;
}
} |
Due to bug in azure-core, RECORD fails, hence I cannot add the test. I've tested LIVE, and it is pass. | public void canCRUDSqlServerWithFirewallRule() throws Exception {
String sqlServerAdminName = "sqladmin";
String id = generateRandomUuid();
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(password())
.withActiveDirectoryAdministrator("DSEng", id)
.withoutAccessFromAzureServices()
.defineFirewallRule("somefirewallrule1")
.withIpAddress("0.0.0.1")
.attach()
.withTag("tag1", "value1")
.create();
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
SqlActiveDirectoryAdministrator sqlADAdmin = sqlServer.getActiveDirectoryAdministrator();
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEng", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlADAdmin = sqlServer.setActiveDirectoryAdministrator("DSEngAll", id);
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEngAll", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlServer.removeActiveDirectoryAdministrator();
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.getActiveDirectoryAdministrator());
SqlFirewallRule firewallRule =
sqlServerManager.sqlServers().firewallRules().getBySqlServer(rgName, sqlServerName, "somefirewallrule1");
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.1", firewallRule.endIpAddress());
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps"));
sqlServer.enableAccessFromAzureServices();
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps");
Assertions.assertEquals("0.0.0.0", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.0", firewallRule.endIpAddress());
sqlServer.update().defineFirewallRule("newFirewallRule1")
.withIpAddress("0.0.0.2")
.attach()
.apply();
sqlServer.firewallRules().delete("newFirewallRule2");
final SqlServer finalSqlServer1 = sqlServer;
validateResourceNotFound(() -> finalSqlServer1.firewallRules().get("newFirewallRule2"));
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.define("newFirewallRule2")
.withExistingSqlServer(rgName, sqlServerName)
.withIpAddress("0.0.0.3")
.create();
Assertions.assertEquals("0.0.0.3", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
firewallRule = firewallRule.update().withStartIpAddress("0.0.0.1").apply();
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
sqlServer.firewallRules().delete("somefirewallrule1");
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "somefirewallrule1"));
firewallRule = sqlServer.firewallRules().define("somefirewallrule2").withIpAddress("0.0.0.4").create();
Assertions.assertEquals("0.0.0.4", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.4", firewallRule.endIpAddress());
firewallRule.delete();
} | public void canCRUDSqlServerWithFirewallRule() throws Exception {
String sqlServerAdminName = "sqladmin";
String id = generateRandomUuid();
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(password())
.withActiveDirectoryAdministrator("DSEng", id)
.withoutAccessFromAzureServices()
.defineFirewallRule("somefirewallrule1")
.withIpAddress("0.0.0.1")
.attach()
.withTag("tag1", "value1")
.create();
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
SqlActiveDirectoryAdministrator sqlADAdmin = sqlServer.getActiveDirectoryAdministrator();
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEng", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlADAdmin = sqlServer.setActiveDirectoryAdministrator("DSEngAll", id);
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEngAll", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlServer.removeActiveDirectoryAdministrator();
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.getActiveDirectoryAdministrator());
SqlFirewallRule firewallRule =
sqlServerManager.sqlServers().firewallRules().getBySqlServer(rgName, sqlServerName, "somefirewallrule1");
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.1", firewallRule.endIpAddress());
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps"));
sqlServer.enableAccessFromAzureServices();
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps");
Assertions.assertEquals("0.0.0.0", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.0", firewallRule.endIpAddress());
sqlServer.update().defineFirewallRule("newFirewallRule1")
.withIpAddress("0.0.0.2")
.attach()
.apply();
sqlServer.firewallRules().delete("newFirewallRule2");
final SqlServer finalSqlServer1 = sqlServer;
validateResourceNotFound(() -> finalSqlServer1.firewallRules().get("newFirewallRule2"));
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.define("newFirewallRule2")
.withExistingSqlServer(rgName, sqlServerName)
.withIpAddress("0.0.0.3")
.create();
Assertions.assertEquals("0.0.0.3", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
firewallRule = firewallRule.update().withStartIpAddress("0.0.0.1").apply();
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
sqlServer.firewallRules().delete("somefirewallrule1");
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "somefirewallrule1"));
firewallRule = sqlServer.firewallRules().define("somefirewallrule2").withIpAddress("0.0.0.4").create();
Assertions.assertEquals("0.0.0.4", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.4", firewallRule.endIpAddress());
firewallRule.delete();
} | class SqlServerOperationsTests extends SqlServerTest {
private static final String SQL_DATABASE_NAME = "myTestDatabase2";
private static final String COLLATION = "SQL_Latin1_General_CP1_CI_AS";
private static final String SQL_ELASTIC_POOL_NAME = "testElasticPool";
private static final String SQL_FIREWALLRULE_NAME = "firewallrule1";
private static final String START_IPADDRESS = "10.102.1.10";
private static final String END_IPADDRESS = "10.102.1.12";
@Test
public void canCRUDSqlSyncMember() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String dbMemberName = "dbMember";
final String syncGroupName = "groupName";
final String syncMemberName = "memberName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbMemberName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlDatabase dbMember = sqlPrimaryServer.databases().get(dbMemberName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
SqlSyncMember sqlSyncMember =
sqlSyncGroup
.syncMembers()
.define(syncMemberName)
.withMemberSqlDatabase(dbMember)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.withDatabaseType(SyncDirection.ONE_WAY_MEMBER_TO_HUB)
.create();
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember
.update()
.withDatabaseType(SyncDirection.BIDIRECTIONAL)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.apply();
Assertions.assertFalse(sqlSyncGroup.syncMembers().list().isEmpty());
sqlSyncMember =
sqlServerManager
.sqlServers()
.syncMembers()
.getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName, syncMemberName);
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember.delete();
sqlSyncGroup.delete();
}
@Test
public void canCRUDSqlSyncGroup() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String syncGroupName = "groupName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.update().withInterval(600).withConflictResolutionPolicyMemberWins().apply();
Assertions
.assertTrue(
sqlServerManager
.sqlServers()
.syncGroups()
.listSyncDatabaseIds(Region.US_EAST)
.stream()
.findAny()
.isPresent());
Assertions.assertFalse(dbSync.syncGroups().list().isEmpty());
sqlSyncGroup =
sqlServerManager.sqlServers().syncGroups().getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName);
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.delete();
}
@Test
public void canCopySqlDatabase() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String epName = "epSample";
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST2)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineElasticPool(epName)
.withPremiumPool()
.attach()
.defineDatabase(dbName)
.withExistingElasticPool(epName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlDatabase dbSample = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbCopy =
sqlSecondaryServer
.databases()
.define("dbCopy")
.withSourceDatabase(dbSample)
.withMode(CreateMode.COPY)
.withPremiumEdition(SqlDatabasePremiumServiceObjective.P1)
.create();
Assertions.assertNotNull(dbCopy);
}
@Test
public void canCRUDSqlFailoverGroup() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String sqlOtherServerName = generateRandomResourceName("sql000", 22);
final String failoverGroupName = generateRandomResourceName("fg", 22);
final String failoverGroupName2 = generateRandomResourceName("fg2", 22);
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST2)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlServer sqlOtherServer =
sqlServerManager
.sqlServers()
.define(sqlOtherServerName)
.withRegion(Region.US_SOUTH_CENTRAL)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlFailoverGroup failoverGroup =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName)
.withManualReadWriteEndpointPolicy()
.withPartnerServerId(sqlSecondaryServer.id())
.withReadOnlyEndpointPolicyDisabled()
.create();
Assertions.assertNotNull(failoverGroup);
Assertions.assertEquals(failoverGroupName, failoverGroup.name());
Assertions.assertEquals(rgName, failoverGroup.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup.replicationRole());
Assertions.assertEquals(1, failoverGroup.partnerServers().size());
Assertions.assertEquals(sqlSecondaryServer.id(), failoverGroup.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup.databases().size());
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroupOnPartner = sqlSecondaryServer.failoverGroups().get(failoverGroup.name());
Assertions.assertEquals(failoverGroupName, failoverGroupOnPartner.name());
Assertions.assertEquals(rgName, failoverGroupOnPartner.resourceGroupName());
Assertions.assertEquals(sqlSecondaryServerName, failoverGroupOnPartner.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.SECONDARY, failoverGroupOnPartner.replicationRole());
Assertions.assertEquals(1, failoverGroupOnPartner.partnerServers().size());
Assertions.assertEquals(sqlPrimaryServer.id(), failoverGroupOnPartner.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.PRIMARY, failoverGroupOnPartner.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroupOnPartner.databases().size());
Assertions.assertEquals(0, failoverGroupOnPartner.readWriteEndpointDataLossGracePeriodMinutes());
Assertions
.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroupOnPartner.readWriteEndpointPolicy());
Assertions
.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroupOnPartner.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroup2 =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName2)
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withPartnerServerId(sqlOtherServer.id())
.withReadOnlyEndpointPolicyEnabled()
.create();
Assertions.assertNotNull(failoverGroup2);
Assertions.assertEquals(failoverGroupName2, failoverGroup2.name());
Assertions.assertEquals(rgName, failoverGroup2.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup2.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup2.replicationRole());
Assertions.assertEquals(1, failoverGroup2.partnerServers().size());
Assertions.assertEquals(sqlOtherServer.id(), failoverGroup2.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup2.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup2.databases().size());
Assertions.assertEquals(120, failoverGroup2.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup2.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup2.readOnlyEndpointPolicy());
failoverGroup
.update()
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withReadOnlyEndpointPolicyEnabled()
.withTag("tag1", "value1")
.apply();
Assertions.assertEquals(120, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup.readOnlyEndpointPolicy());
SqlDatabase db = sqlPrimaryServer.databases().get(dbName);
failoverGroup
.update()
.withManualReadWriteEndpointPolicy()
.withReadOnlyEndpointPolicyDisabled()
.withNewDatabaseId(db.id())
.apply();
Assertions.assertEquals(1, failoverGroup.databases().size());
Assertions.assertEquals(db.id(), failoverGroup.databases().get(0));
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
List<SqlFailoverGroup> failoverGroupsList = sqlPrimaryServer.failoverGroups().list();
Assertions.assertEquals(2, failoverGroupsList.size());
failoverGroupsList = sqlSecondaryServer.failoverGroups().list();
Assertions.assertEquals(1, failoverGroupsList.size());
sqlPrimaryServer.failoverGroups().delete(failoverGroup2.name());
}
@Test
public void canChangeSqlServerAndDatabaseAutomaticTuning() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlServerAutomaticTuning serverAutomaticTuning = sqlServer.getServerAutomaticTuning();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions.assertEquals(4, serverAutomaticTuning.tuningOptions().size());
serverAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningServerMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
serverAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
SqlDatabaseAutomaticTuning databaseAutomaticTuning = dbFromSample.getDatabaseAutomaticTuning();
Assertions.assertEquals(4, databaseAutomaticTuning.tuningOptions().size());
databaseAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
databaseAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCreateAndAquireServerDnsAlias() throws Exception {
String sqlServerName1 = sqlServerName + "1";
String sqlServerName2 = sqlServerName + "2";
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
SqlServer sqlServer1 =
sqlServerManager
.sqlServers()
.define(sqlServerName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer1);
SqlServerDnsAlias dnsAlias = sqlServer1.dnsAliases().define(sqlServerName).create();
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
dnsAlias = sqlServerManager.sqlServers().dnsAliases().getBySqlServer(rgName, sqlServerName1, sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
Assertions.assertEquals(1, sqlServer1.databases().list().size());
SqlServer sqlServer2 =
sqlServerManager
.sqlServers()
.define(sqlServerName2)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer2);
sqlServer2.dnsAliases().acquire(sqlServerName, sqlServer1.id());
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
dnsAlias = sqlServer2.dnsAliases().get(sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName2, dnsAlias.sqlServerName());
dnsAlias.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName1);
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName2);
}
@Test
public void canGetSqlServerCapabilitiesAndCreateIdentity() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
RegionCapabilities regionCapabilities = sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST);
Assertions.assertNotNull(regionCapabilities);
Assertions.assertNotNull(regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0"));
Assertions
.assertTrue(
regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0").supportedEditions().size() > 0);
Assertions
.assertTrue(
regionCapabilities
.supportedCapabilitiesByServerVersion()
.get("12.0")
.supportedElasticPoolEditions()
.size()
> 0);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withSystemAssignedManagedServiceIdentity()
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
sqlServer.update().withSystemAssignedManagedServiceIdentity().apply();
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCRUDSqlServerWithImportDatabase() throws Exception {
if (isPlaybackMode()) {
return;
}
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withActiveDirectoryAdministrator("DSEng", id)
.create();
SqlDatabase dbFromSample =
sqlServer
.databases()
.define("db-from-sample")
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.withTag("tag1", "value1")
.create();
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlDatabaseImportExportResponse exportedDB;
StorageAccount storageAccount = null;
try {
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
if (storageAccount == null) {
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(sqlServer.regionName())
.withExistingResourceGroup(sqlServer.resourceGroupName());
exportedDB =
dbFromSample
.exportTo(storageAccountCreatable, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} else {
exportedDB =
dbFromSample
.exportTo(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
}
SqlDatabase dbFromImport =
sqlServer
.databases()
.define("db-from-import")
.defineElasticPool("ep1")
.withBasicPool()
.attach()
.importFrom(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.withTag("tag2", "value2")
.create();
Assertions.assertNotNull(dbFromImport);
Assertions.assertEquals("ep1", dbFromImport.elasticPoolName());
dbFromImport.delete();
dbFromSample.delete();
sqlServer.elasticPools().delete("ep1");
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
@Disabled("Depends on the existing SQL server")
@Test
public void canListRecommendedElasticPools() throws Exception {
SqlServer sqlServer = sqlServerManager.sqlServers().getByResourceGroup("ans", "ans-secondary");
sqlServer
.databases()
.list()
.get(0)
.listServiceTierAdvisors()
.values()
.iterator()
.next()
.serviceLevelObjectiveUsageMetric();
Map<String, RecommendedElasticPool> recommendedElasticPools = sqlServer.listRecommendedElasticPools();
Assertions.assertNotNull(recommendedElasticPools);
}
@Test
public void canCRUDSqlServer() throws Exception {
CheckNameAvailabilityResult checkNameResult =
sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertTrue(checkNameResult.isAvailable());
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
checkNameResult = sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertFalse(checkNameResult.isAvailable());
Assertions
.assertEquals(
CheckNameAvailabilityReason.ALREADY_EXISTS.toString(), checkNameResult.unavailabilityReason());
List<ServiceObjective> serviceObjectives = sqlServer.listServiceObjectives();
Assertions.assertNotEquals(serviceObjectives.size(), 0);
Assertions.assertNotNull(serviceObjectives.get(0).refresh());
Assertions.assertNotNull(sqlServer.getServiceObjective("d1737d22-a8ea-4de7-9bd0-33395d2a7419"));
sqlServer.update().withAdministratorPassword("P@ssword~2").apply();
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canUseCoolShortcutsForResourceCreation() throws Exception {
String database2Name = "database2";
String database1InEPName = "database1InEP";
String database2InEPName = "database2InEP";
String elasticPool2Name = "elasticPool2";
String elasticPool3Name = "elasticPool3";
String elasticPool1Name = SQL_ELASTIC_POOL_NAME;
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("Password~1")
.withoutAccessFromAzureServices()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.create();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
false);
elasticPool1Name = SQL_ELASTIC_POOL_NAME + " U";
database2Name = "database2U";
database1InEPName = "database1InEPU";
database2InEPName = "database2InEPU";
elasticPool2Name = "elasticPool2U";
elasticPool3Name = "elasticPool3U";
sqlServer =
sqlServer
.update()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.withTag("tag2", "value2")
.apply();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
true);
sqlServer.refresh();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabase() throws Exception {
SqlServer sqlServer = createSqlServer();
Mono<SqlDatabase> resourceStream =
sqlServer.databases().define(SQL_DATABASE_NAME).withStandardEdition(SqlDatabaseStandardServiceObjective.S0).createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
Assertions.assertTrue(sqlServer.databases().list().size() > 0);
TransparentDataEncryption transparentDataEncryption = sqlDatabase.getTransparentDataEncryption();
Assertions.assertNotNull(transparentDataEncryption.status());
List<TransparentDataEncryptionActivity> transparentDataEncryptionActivities =
transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
transparentDataEncryption = transparentDataEncryption.updateStatus(TransparentDataEncryptionStatus.ENABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.ENABLED);
transparentDataEncryptionActivities = transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
transparentDataEncryption =
sqlDatabase.getTransparentDataEncryption().updateStatus(TransparentDataEncryptionStatus.DISABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.DISABLED);
Assertions.assertEquals(transparentDataEncryption.sqlServerName(), sqlServerName);
Assertions.assertEquals(transparentDataEncryption.databaseName(), SQL_DATABASE_NAME);
Assertions.assertNotNull(transparentDataEncryption.name());
Assertions.assertNotNull(transparentDataEncryption.id());
Map<String, ServiceTierAdvisor> serviceTierAdvisors = sqlDatabase.listServiceTierAdvisors();
Assertions.assertNotNull(serviceTierAdvisors);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
Assertions.assertNotEquals(serviceTierAdvisors.size(), 0);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().refresh());
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer.elasticPools().define(SQL_ELASTIC_POOL_NAME).withStandardPool();
String anotherDatabaseName = "anotherDatabase";
SqlDatabase anotherDatabase =
sqlServer
.databases()
.define(anotherDatabaseName)
.withNewElasticPool(sqlElasticPoolCreatable)
.withSourceDatabase(sqlDatabase.id())
.withMode(CreateMode.COPY)
.create();
validateSqlDatabaseWithElasticPool(anotherDatabase, anotherDatabaseName);
sqlServer.databases().delete(anotherDatabase.name());
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
sqlDatabase = resourceStream.block();
sqlDatabase = sqlDatabase.rename("renamedDatabase");
validateSqlDatabase(sqlDatabase, "renamedDatabase");
sqlServer.databases().delete(sqlDatabase.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canManageReplicationLinks() throws Exception {
String anotherSqlServerName = sqlServerName + "another";
SqlServer sqlServer1 = createSqlServer();
SqlServer sqlServer2 = createSqlServer(anotherSqlServerName);
Mono<SqlDatabase> resourceStream =
sqlServer1
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
SqlDatabase databaseInServer1 = resourceStream.block();
validateSqlDatabase(databaseInServer1, SQL_DATABASE_NAME);
SqlDatabase databaseInServer2 =
sqlServer2
.databases()
.define(SQL_DATABASE_NAME)
.withSourceDatabase(databaseInServer1.id())
.withMode(CreateMode.ONLINE_SECONDARY)
.create();
ResourceManagerUtils.sleep(Duration.ofSeconds(2));
List<ReplicationLink> replicationLinksInDb1 =
new ArrayList<>(databaseInServer1.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb1.size(), 1);
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerDatabase(), databaseInServer2.name());
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerServer(), databaseInServer2.sqlServerName());
List<ReplicationLink> replicationLinksInDb2 =
new ArrayList<>(databaseInServer2.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb2.size(), 1);
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerDatabase(), databaseInServer1.name());
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerServer(), databaseInServer1.sqlServerName());
Assertions.assertNotNull(replicationLinksInDb1.get(0).refresh());
replicationLinksInDb2.get(0).failover();
replicationLinksInDb2.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb1.get(0).forceFailoverAllowDataLoss();
replicationLinksInDb1.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb2.get(0).delete();
Assertions.assertEquals(databaseInServer2.listReplicationLinks().size(), 0);
sqlServer1.databases().delete(databaseInServer1.name());
sqlServer2.databases().delete(databaseInServer2.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer2.resourceGroupName(), sqlServer2.name());
validateSqlServerNotFound(sqlServer2);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer1.resourceGroupName(), sqlServer1.name());
validateSqlServerNotFound(sqlServer1);
}
@Test
public void canDoOperationsOnDataWarehouse() throws Exception {
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
Assertions.assertNotNull(sqlServer.listUsageMetrics());
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withSku(DatabaseSku.DATAWAREHOUSE_DW1000C)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
Assertions.assertNotNull(sqlDatabase);
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNotNull(sqlDatabase);
Assertions.assertTrue(sqlDatabase.isDataWarehouse());
SqlWarehouse dataWarehouse = sqlServer.databases().get(SQL_DATABASE_NAME).asWarehouse();
Assertions.assertNotNull(dataWarehouse);
Assertions.assertEquals(dataWarehouse.name(), SQL_DATABASE_NAME);
Assertions.assertEquals(dataWarehouse.edition(), DatabaseEdition.DATA_WAREHOUSE);
Assertions.assertNotNull(dataWarehouse.listRestorePoints());
Assertions.assertNotNull(dataWarehouse.listUsageMetrics());
dataWarehouse.pauseDataWarehouse();
dataWarehouse.resumeDataWarehouse();
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabaseWithElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1");
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withNewElasticPool(sqlElasticPoolCreatable)
.withCollation(COLLATION)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
SqlElasticPool elasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPool(elasticPool);
validateSqlDatabaseWithElasticPool(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlDatabase
.update()
.withoutElasticPool()
.withStandardEdition(SqlDatabaseStandardServiceObjective.S3)
.apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNull(sqlDatabase.elasticPoolName());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P1).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.PREMIUM);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P1.toString());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P2).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P2.toString());
Assertions.assertEquals(sqlDatabase.requestedServiceObjectiveName(), ServiceObjectiveName.P2.toString());
sqlDatabase.update().withMaxSizeBytes(268435456000L).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.maxSizeBytes(), 268435456000L);
sqlDatabase.update().withExistingElasticPool(SQL_ELASTIC_POOL_NAME).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.elasticPoolName(), SQL_ELASTIC_POOL_NAME);
Assertions.assertNotNull(elasticPool.listActivities());
Assertions.assertNotNull(elasticPool.listDatabaseActivities());
List<SqlDatabase> databasesInElasticPool = elasticPool.listDatabases();
Assertions.assertNotNull(databasesInElasticPool);
Assertions.assertEquals(databasesInElasticPool.size(), 1);
SqlDatabase databaseInElasticPool = elasticPool.getDatabase(SQL_DATABASE_NAME);
validateSqlDatabase(databaseInElasticPool, SQL_DATABASE_NAME);
databaseInElasticPool.refresh();
validateResourceNotFound(() -> elasticPool.getDatabase("does_not_exist"));
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
SqlElasticPool sqlElasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withExistingElasticPool(sqlElasticPool)
.withCollation(COLLATION)
.createAsync();
sqlDatabase = resourceStream.block();
sqlServer.databases().delete(sqlDatabase.name());
validateSqlDatabaseNotFound("newDatabase");
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlElasticPool> resourceStream =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1")
.createAsync();
SqlElasticPool sqlElasticPool = resourceStream.block();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 0);
sqlElasticPool =
sqlElasticPool
.update()
.withReservedDtu(SqlElasticPoolBasicEDTUs.eDTU_100)
.withDatabaseMaxCapacity(20)
.withDatabaseMinCapacity(10)
.withStorageCapacity(102400 * 1024 * 1024L)
.withNewDatabase(SQL_DATABASE_NAME)
.withTag("tag2", "value2")
.apply();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 1);
Assertions.assertNotNull(sqlElasticPool.getDatabase(SQL_DATABASE_NAME));
validateSqlElasticPool(sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME));
validateListSqlElasticPool(sqlServer.elasticPools().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPoolNotFound(sqlServer, SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer.elasticPools().define("newElasticPool").withStandardPool().createAsync();
sqlElasticPool = resourceStream.block();
sqlServer.elasticPools().delete(sqlElasticPool.name());
validateSqlElasticPoolNotFound(sqlServer, "newElasticPool");
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlFirewallRule() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlFirewallRule> resourceStream =
sqlServer
.firewallRules()
.define(SQL_FIREWALLRULE_NAME)
.withIpAddressRange(START_IPADDRESS, END_IPADDRESS)
.createAsync();
SqlFirewallRule sqlFirewallRule = resourceStream.block();
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
String secondFirewallRuleName = "secondFireWallRule";
SqlFirewallRule secondFirewallRule =
sqlServer.firewallRules().define(secondFirewallRuleName).withIpAddress(START_IPADDRESS).create();
Assertions.assertNotNull(secondFirewallRule);
secondFirewallRule = sqlServer.firewallRules().get(secondFirewallRuleName);
Assertions.assertNotNull(secondFirewallRule);
Assertions.assertEquals(START_IPADDRESS, secondFirewallRule.endIpAddress());
secondFirewallRule = secondFirewallRule.update().withEndIpAddress(END_IPADDRESS).apply();
validateSqlFirewallRule(secondFirewallRule, secondFirewallRuleName);
sqlServer.firewallRules().delete(secondFirewallRuleName);
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.firewallRules().get(secondFirewallRuleName));
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
sqlFirewallRule.update().withEndIpAddress(START_IPADDRESS).apply();
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
Assertions.assertEquals(sqlFirewallRule.endIpAddress(), START_IPADDRESS);
validateListSqlFirewallRule(sqlServer.firewallRules().list());
sqlServer.firewallRules().delete(sqlFirewallRule.name());
validateSqlFirewallRuleNotFound();
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
private void validateMultiCreation(
String database2Name,
String database1InEPName,
String database2InEPName,
String elasticPool1Name,
String elasticPool2Name,
String elasticPool3Name,
SqlServer sqlServer,
boolean deleteUsingUpdate) {
validateSqlServer(sqlServer);
validateSqlServer(sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName));
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
List<SqlFirewallRule> firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(3, firewalls.size());
int startIPAddress = 0;
int endIPAddress = 0;
for (SqlFirewallRule firewall : firewalls) {
if (!firewall.name().equalsIgnoreCase(SQL_FIREWALLRULE_NAME)) {
Assertions.assertEquals(firewall.startIpAddress(), START_IPADDRESS);
if (firewall.endIpAddress().equalsIgnoreCase(START_IPADDRESS)) {
startIPAddress++;
} else if (firewall.endIpAddress().equalsIgnoreCase(END_IPADDRESS)) {
endIPAddress++;
}
}
}
Assertions.assertEquals(startIPAddress, 1);
Assertions.assertEquals(endIPAddress, 1);
Assertions.assertNotNull(sqlServer.databases().get(database2Name));
Assertions.assertNotNull(sqlServer.databases().get(database1InEPName));
Assertions.assertNotNull(sqlServer.databases().get(database2InEPName));
SqlElasticPool ep1 = sqlServer.elasticPools().get(elasticPool1Name);
validateSqlElasticPool(ep1, elasticPool1Name);
SqlElasticPool ep2 = sqlServer.elasticPools().get(elasticPool2Name);
Assertions.assertNotNull(ep2);
Assertions.assertEquals(ep2.edition(), ElasticPoolEdition.PREMIUM);
Assertions.assertEquals(ep2.listDatabases().size(), 2);
Assertions.assertNotNull(ep2.getDatabase(database1InEPName));
Assertions.assertNotNull(ep2.getDatabase(database2InEPName));
SqlElasticPool ep3 = sqlServer.elasticPools().get(elasticPool3Name);
Assertions.assertNotNull(ep3);
Assertions.assertEquals(ep3.edition(), ElasticPoolEdition.STANDARD);
if (!deleteUsingUpdate) {
sqlServer.databases().delete(database2Name);
sqlServer.databases().delete(database1InEPName);
sqlServer.databases().delete(database2InEPName);
sqlServer.databases().delete(SQL_DATABASE_NAME);
Assertions.assertEquals(ep1.listDatabases().size(), 0);
Assertions.assertEquals(ep2.listDatabases().size(), 0);
Assertions.assertEquals(ep3.listDatabases().size(), 0);
sqlServer.elasticPools().delete(elasticPool1Name);
sqlServer.elasticPools().delete(elasticPool2Name);
sqlServer.elasticPools().delete(elasticPool3Name);
firewalls = sqlServer.firewallRules().list();
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
} else {
sqlServer
.update()
.withoutDatabase(database2Name)
.withoutElasticPool(elasticPool1Name)
.withoutElasticPool(elasticPool2Name)
.withoutElasticPool(elasticPool3Name)
.withoutDatabase(database1InEPName)
.withoutDatabase(SQL_DATABASE_NAME)
.withoutDatabase(database2InEPName)
.withoutFirewallRule(SQL_FIREWALLRULE_NAME)
.apply();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(firewalls.size(), 2);
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
}
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
Assertions.assertEquals(sqlServer.databases().list().size(), 1);
}
private void validateSqlFirewallRuleNotFound() {
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.getByResourceGroup(rgName, sqlServerName)
.firewallRules()
.get(SQL_FIREWALLRULE_NAME));
}
private void validateSqlElasticPoolNotFound(SqlServer sqlServer, String elasticPoolName) {
validateResourceNotFound(() -> sqlServer.elasticPools().get(elasticPoolName));
}
private void validateSqlDatabaseNotFound(String newDatabase) {
validateResourceNotFound(
() -> sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName).databases().get(newDatabase));
}
private void validateSqlServerNotFound(SqlServer sqlServer) {
validateResourceNotFound(() -> sqlServerManager.sqlServers().getById(sqlServer.id()));
}
private void validateResourceNotFound(Supplier<Object> fetchResource) {
try {
Object result = fetchResource.get();
Assertions.assertNull(result);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
}
private SqlServer createSqlServer() {
return createSqlServer(sqlServerName);
}
private SqlServer createSqlServer(String sqlServerName) {
return sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("P@ssword~1")
.create();
}
private static void validateListSqlFirewallRule(List<SqlFirewallRule> sqlFirewallRules) {
boolean found = false;
for (SqlFirewallRule firewallRule : sqlFirewallRules) {
if (firewallRule.name().equals(SQL_FIREWALLRULE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlFirewallRule(SqlFirewallRule sqlFirewallRule, String firewallName) {
Assertions.assertNotNull(sqlFirewallRule);
Assertions.assertEquals(firewallName, sqlFirewallRule.name());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(START_IPADDRESS, sqlFirewallRule.startIpAddress());
Assertions.assertEquals(END_IPADDRESS, sqlFirewallRule.endIpAddress());
Assertions.assertEquals(rgName, sqlFirewallRule.resourceGroupName());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(Region.US_EAST, sqlFirewallRule.region());
}
private static void validateListSqlElasticPool(List<SqlElasticPool> sqlElasticPools) {
boolean found = false;
for (SqlElasticPool elasticPool : sqlElasticPools) {
if (elasticPool.name().equals(SQL_ELASTIC_POOL_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool) {
validateSqlElasticPool(sqlElasticPool, SQL_ELASTIC_POOL_NAME);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool, String elasticPoolName) {
Assertions.assertNotNull(sqlElasticPool);
Assertions.assertEquals(rgName, sqlElasticPool.resourceGroupName());
Assertions.assertEquals(elasticPoolName, sqlElasticPool.name());
Assertions.assertEquals(sqlServerName, sqlElasticPool.sqlServerName());
Assertions.assertEquals(ElasticPoolEdition.STANDARD, sqlElasticPool.edition());
Assertions.assertNotNull(sqlElasticPool.creationDate());
Assertions.assertNotEquals(0, sqlElasticPool.databaseDtuMax());
Assertions.assertNotEquals(0, sqlElasticPool.dtu());
}
private static void validateListSqlDatabase(List<SqlDatabase> sqlDatabases) {
boolean found = false;
for (SqlDatabase database : sqlDatabases) {
if (database.name().equals(SQL_DATABASE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlServer(SqlServer sqlServer) {
Assertions.assertNotNull(sqlServer);
Assertions.assertEquals(rgName, sqlServer.resourceGroupName());
Assertions.assertNotNull(sqlServer.fullyQualifiedDomainName());
Assertions.assertEquals("userName", sqlServer.administratorLogin());
}
private void validateSqlDatabase(SqlDatabase sqlDatabase, String databaseName) {
Assertions.assertNotNull(sqlDatabase);
Assertions.assertEquals(sqlDatabase.name(), databaseName);
Assertions.assertEquals(sqlServerName, sqlDatabase.sqlServerName());
Assertions.assertEquals(sqlDatabase.collation(), COLLATION);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.STANDARD);
}
private void validateSqlDatabaseWithElasticPool(SqlDatabase sqlDatabase, String databaseName) {
validateSqlDatabase(sqlDatabase, databaseName);
Assertions.assertEquals(SQL_ELASTIC_POOL_NAME, sqlDatabase.elasticPoolName());
}
@Test
public void testRandomSku() {
List<DatabaseSku> databaseSkus = new LinkedList<>(Arrays.asList(DatabaseSku.getAll().toArray(new DatabaseSku[0])));
Collections.shuffle(databaseSkus);
List<ElasticPoolSku> elasticPoolSkus = new LinkedList<>(Arrays.asList(ElasticPoolSku.getAll().toArray(new ElasticPoolSku[0])));
Collections.shuffle(elasticPoolSkus);
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
if (serviceObjective.status() != CapabilityStatus.AVAILABLE && serviceObjective.status() != CapabilityStatus.DEFAULT) {
databaseSkus.remove(DatabaseSku.fromSku(serviceObjective.sku()));
}
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
if (performance.status() != CapabilityStatus.AVAILABLE && performance.status() != CapabilityStatus.DEFAULT) {
elasticPoolSkus.remove(ElasticPoolSku.fromSku(performance.sku()));
}
});
});
});
SqlServer sqlServer = sqlServerManager.sqlServers().define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword(password())
.create();
Flux.merge(
Flux.range(0, 5)
.flatMap(i -> sqlServer.databases().define("database" + i).withSku(databaseSkus.get(i)).createAsync().cast(Indexable.class)),
Flux.range(0, 5)
.flatMap(i -> sqlServer.elasticPools().define("elasticPool" + i).withSku(elasticPoolSkus.get(i)).createAsync().cast(Indexable.class))
)
.blockLast();
}
@Test
@Disabled("Only run for updating sku")
public void generateSku() throws IOException {
StringBuilder databaseSkuBuilder = new StringBuilder();
StringBuilder elasticPoolSkuBuilder = new StringBuilder();
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
addStaticSkuDefinition(databaseSkuBuilder, edition.name(), serviceObjective.name(), serviceObjective.sku(), "DatabaseSku");
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
String detailName = String.format("%s_%d", performance.sku().name(), performance.sku().capacity());
addStaticSkuDefinition(elasticPoolSkuBuilder, edition.name(), detailName, performance.sku(), "ElasticPoolSku");
});
});
});
String databaseSku = new String(readAllBytes(getClass().getResourceAsStream("/DatabaseSku.java")), StandardCharsets.UTF_8);
databaseSku = databaseSku.replace("<Generated>", databaseSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/DatabaseSku.java").toPath(), databaseSku.getBytes(StandardCharsets.UTF_8));
String elasticPoolSku = new String(readAllBytes(getClass().getResourceAsStream("/ElasticPoolSku.java")), StandardCharsets.UTF_8);
elasticPoolSku = elasticPoolSku.replace("<Generated>", elasticPoolSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/ElasticPoolSku.java").toPath(), elasticPoolSku.getBytes(StandardCharsets.UTF_8));
sqlServerManager.resourceManager().resourceGroups().define(rgName).withRegion(Region.US_EAST).create();
}
private byte[] readAllBytes(InputStream inputStream) throws IOException {
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
byte[] data = new byte[4096];
while (true) {
int size = inputStream.read(data);
if (size > 0) {
outputStream.write(data, 0, size);
} else {
return outputStream.toByteArray();
}
}
}
}
private void addStaticSkuDefinition(StringBuilder builder, String edition, String detailName, Sku sku, String className) {
builder
.append("\n ").append("/** ").append(edition).append(" Edition with ").append(detailName).append(" sku. */")
.append("\n ").append("public static final ").append(className).append(" ").append(String.format("%s_%s", edition.toUpperCase(Locale.ROOT), detailName.toUpperCase(Locale.ROOT))).append(" =")
.append("\n new ").append(className).append("(")
.append(sku.name() == null ? null : "\"" + sku.name() + "\"")
.append(", ")
.append(sku.tier() == null ? null : "\"" + sku.tier() + "\"")
.append(", ")
.append(sku.family() == null ? null : "\"" + sku.family() + "\"")
.append(", ")
.append(sku.capacity())
.append(", ")
.append(sku.size() == null ? null : "\"" + sku.size() + "\"")
.append(");");
}
} | class SqlServerOperationsTests extends SqlServerTest {
private static final String SQL_DATABASE_NAME = "myTestDatabase2";
private static final String COLLATION = "SQL_Latin1_General_CP1_CI_AS";
private static final String SQL_ELASTIC_POOL_NAME = "testElasticPool";
private static final String SQL_FIREWALLRULE_NAME = "firewallrule1";
private static final String START_IPADDRESS = "10.102.1.10";
private static final String END_IPADDRESS = "10.102.1.12";
@Test
public void canCRUDSqlSyncMember() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String dbMemberName = "dbMember";
final String syncGroupName = "groupName";
final String syncMemberName = "memberName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbMemberName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlDatabase dbMember = sqlPrimaryServer.databases().get(dbMemberName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
SqlSyncMember sqlSyncMember =
sqlSyncGroup
.syncMembers()
.define(syncMemberName)
.withMemberSqlDatabase(dbMember)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.withDatabaseType(SyncDirection.ONE_WAY_MEMBER_TO_HUB)
.create();
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember
.update()
.withDatabaseType(SyncDirection.BIDIRECTIONAL)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.apply();
Assertions.assertFalse(sqlSyncGroup.syncMembers().list().isEmpty());
sqlSyncMember =
sqlServerManager
.sqlServers()
.syncMembers()
.getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName, syncMemberName);
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember.delete();
sqlSyncGroup.delete();
}
@Test
public void canCRUDSqlSyncGroup() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String syncGroupName = "groupName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.update().withInterval(600).withConflictResolutionPolicyMemberWins().apply();
Assertions
.assertTrue(
sqlServerManager
.sqlServers()
.syncGroups()
.listSyncDatabaseIds(Region.US_EAST)
.stream()
.findAny()
.isPresent());
Assertions.assertFalse(dbSync.syncGroups().list().isEmpty());
sqlSyncGroup =
sqlServerManager.sqlServers().syncGroups().getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName);
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.delete();
}
@Test
public void canCopySqlDatabase() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String epName = "epSample";
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST2)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineElasticPool(epName)
.withPremiumPool()
.attach()
.defineDatabase(dbName)
.withExistingElasticPool(epName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlDatabase dbSample = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbCopy =
sqlSecondaryServer
.databases()
.define("dbCopy")
.withSourceDatabase(dbSample)
.withMode(CreateMode.COPY)
.withPremiumEdition(SqlDatabasePremiumServiceObjective.P1)
.create();
Assertions.assertNotNull(dbCopy);
}
@Test
public void canCRUDSqlFailoverGroup() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String sqlOtherServerName = generateRandomResourceName("sql000", 22);
final String failoverGroupName = generateRandomResourceName("fg", 22);
final String failoverGroupName2 = generateRandomResourceName("fg2", 22);
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST2)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlServer sqlOtherServer =
sqlServerManager
.sqlServers()
.define(sqlOtherServerName)
.withRegion(Region.US_SOUTH_CENTRAL)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlFailoverGroup failoverGroup =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName)
.withManualReadWriteEndpointPolicy()
.withPartnerServerId(sqlSecondaryServer.id())
.withReadOnlyEndpointPolicyDisabled()
.create();
Assertions.assertNotNull(failoverGroup);
Assertions.assertEquals(failoverGroupName, failoverGroup.name());
Assertions.assertEquals(rgName, failoverGroup.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup.replicationRole());
Assertions.assertEquals(1, failoverGroup.partnerServers().size());
Assertions.assertEquals(sqlSecondaryServer.id(), failoverGroup.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup.databases().size());
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroupOnPartner = sqlSecondaryServer.failoverGroups().get(failoverGroup.name());
Assertions.assertEquals(failoverGroupName, failoverGroupOnPartner.name());
Assertions.assertEquals(rgName, failoverGroupOnPartner.resourceGroupName());
Assertions.assertEquals(sqlSecondaryServerName, failoverGroupOnPartner.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.SECONDARY, failoverGroupOnPartner.replicationRole());
Assertions.assertEquals(1, failoverGroupOnPartner.partnerServers().size());
Assertions.assertEquals(sqlPrimaryServer.id(), failoverGroupOnPartner.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.PRIMARY, failoverGroupOnPartner.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroupOnPartner.databases().size());
Assertions.assertEquals(0, failoverGroupOnPartner.readWriteEndpointDataLossGracePeriodMinutes());
Assertions
.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroupOnPartner.readWriteEndpointPolicy());
Assertions
.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroupOnPartner.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroup2 =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName2)
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withPartnerServerId(sqlOtherServer.id())
.withReadOnlyEndpointPolicyEnabled()
.create();
Assertions.assertNotNull(failoverGroup2);
Assertions.assertEquals(failoverGroupName2, failoverGroup2.name());
Assertions.assertEquals(rgName, failoverGroup2.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup2.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup2.replicationRole());
Assertions.assertEquals(1, failoverGroup2.partnerServers().size());
Assertions.assertEquals(sqlOtherServer.id(), failoverGroup2.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup2.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup2.databases().size());
Assertions.assertEquals(120, failoverGroup2.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup2.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup2.readOnlyEndpointPolicy());
failoverGroup
.update()
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withReadOnlyEndpointPolicyEnabled()
.withTag("tag1", "value1")
.apply();
Assertions.assertEquals(120, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup.readOnlyEndpointPolicy());
SqlDatabase db = sqlPrimaryServer.databases().get(dbName);
failoverGroup
.update()
.withManualReadWriteEndpointPolicy()
.withReadOnlyEndpointPolicyDisabled()
.withNewDatabaseId(db.id())
.apply();
Assertions.assertEquals(1, failoverGroup.databases().size());
Assertions.assertEquals(db.id(), failoverGroup.databases().get(0));
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
List<SqlFailoverGroup> failoverGroupsList = sqlPrimaryServer.failoverGroups().list();
Assertions.assertEquals(2, failoverGroupsList.size());
failoverGroupsList = sqlSecondaryServer.failoverGroups().list();
Assertions.assertEquals(1, failoverGroupsList.size());
sqlPrimaryServer.failoverGroups().delete(failoverGroup2.name());
}
@Test
public void canChangeSqlServerAndDatabaseAutomaticTuning() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlServerAutomaticTuning serverAutomaticTuning = sqlServer.getServerAutomaticTuning();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions.assertEquals(4, serverAutomaticTuning.tuningOptions().size());
serverAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningServerMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
serverAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
SqlDatabaseAutomaticTuning databaseAutomaticTuning = dbFromSample.getDatabaseAutomaticTuning();
Assertions.assertEquals(4, databaseAutomaticTuning.tuningOptions().size());
databaseAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
databaseAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCreateAndAquireServerDnsAlias() throws Exception {
String sqlServerName1 = sqlServerName + "1";
String sqlServerName2 = sqlServerName + "2";
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
SqlServer sqlServer1 =
sqlServerManager
.sqlServers()
.define(sqlServerName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer1);
SqlServerDnsAlias dnsAlias = sqlServer1.dnsAliases().define(sqlServerName).create();
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
dnsAlias = sqlServerManager.sqlServers().dnsAliases().getBySqlServer(rgName, sqlServerName1, sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
Assertions.assertEquals(1, sqlServer1.databases().list().size());
SqlServer sqlServer2 =
sqlServerManager
.sqlServers()
.define(sqlServerName2)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer2);
sqlServer2.dnsAliases().acquire(sqlServerName, sqlServer1.id());
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
dnsAlias = sqlServer2.dnsAliases().get(sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName2, dnsAlias.sqlServerName());
dnsAlias.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName1);
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName2);
}
@Test
public void canGetSqlServerCapabilitiesAndCreateIdentity() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
RegionCapabilities regionCapabilities = sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST);
Assertions.assertNotNull(regionCapabilities);
Assertions.assertNotNull(regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0"));
Assertions
.assertTrue(
regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0").supportedEditions().size() > 0);
Assertions
.assertTrue(
regionCapabilities
.supportedCapabilitiesByServerVersion()
.get("12.0")
.supportedElasticPoolEditions()
.size()
> 0);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withSystemAssignedManagedServiceIdentity()
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
sqlServer.update().withSystemAssignedManagedServiceIdentity().apply();
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCRUDSqlServerWithImportDatabase() throws Exception {
if (isPlaybackMode()) {
return;
}
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withActiveDirectoryAdministrator("DSEng", id)
.create();
SqlDatabase dbFromSample =
sqlServer
.databases()
.define("db-from-sample")
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.withTag("tag1", "value1")
.create();
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlDatabaseImportExportResponse exportedDB;
StorageAccount storageAccount = null;
try {
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
if (storageAccount == null) {
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(sqlServer.regionName())
.withExistingResourceGroup(sqlServer.resourceGroupName());
exportedDB =
dbFromSample
.exportTo(storageAccountCreatable, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} else {
exportedDB =
dbFromSample
.exportTo(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
}
SqlDatabase dbFromImport =
sqlServer
.databases()
.define("db-from-import")
.defineElasticPool("ep1")
.withBasicPool()
.attach()
.importFrom(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.withTag("tag2", "value2")
.create();
Assertions.assertNotNull(dbFromImport);
Assertions.assertEquals("ep1", dbFromImport.elasticPoolName());
dbFromImport.delete();
dbFromSample.delete();
sqlServer.elasticPools().delete("ep1");
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
@Disabled("Depends on the existing SQL server")
@Test
public void canListRecommendedElasticPools() throws Exception {
SqlServer sqlServer = sqlServerManager.sqlServers().getByResourceGroup("ans", "ans-secondary");
sqlServer
.databases()
.list()
.get(0)
.listServiceTierAdvisors()
.values()
.iterator()
.next()
.serviceLevelObjectiveUsageMetric();
Map<String, RecommendedElasticPool> recommendedElasticPools = sqlServer.listRecommendedElasticPools();
Assertions.assertNotNull(recommendedElasticPools);
}
@Test
public void canCRUDSqlServer() throws Exception {
CheckNameAvailabilityResult checkNameResult =
sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertTrue(checkNameResult.isAvailable());
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
checkNameResult = sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertFalse(checkNameResult.isAvailable());
Assertions
.assertEquals(
CheckNameAvailabilityReason.ALREADY_EXISTS.toString(), checkNameResult.unavailabilityReason());
List<ServiceObjective> serviceObjectives = sqlServer.listServiceObjectives();
Assertions.assertNotEquals(serviceObjectives.size(), 0);
Assertions.assertNotNull(serviceObjectives.get(0).refresh());
Assertions.assertNotNull(sqlServer.getServiceObjective("d1737d22-a8ea-4de7-9bd0-33395d2a7419"));
sqlServer.update().withAdministratorPassword("P@ssword~2").apply();
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canUseCoolShortcutsForResourceCreation() throws Exception {
String database2Name = "database2";
String database1InEPName = "database1InEP";
String database2InEPName = "database2InEP";
String elasticPool2Name = "elasticPool2";
String elasticPool3Name = "elasticPool3";
String elasticPool1Name = SQL_ELASTIC_POOL_NAME;
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("Password~1")
.withoutAccessFromAzureServices()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.create();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
false);
elasticPool1Name = SQL_ELASTIC_POOL_NAME + " U";
database2Name = "database2U";
database1InEPName = "database1InEPU";
database2InEPName = "database2InEPU";
elasticPool2Name = "elasticPool2U";
elasticPool3Name = "elasticPool3U";
sqlServer =
sqlServer
.update()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.withTag("tag2", "value2")
.apply();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
true);
sqlServer.refresh();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabase() throws Exception {
SqlServer sqlServer = createSqlServer();
Mono<SqlDatabase> resourceStream =
sqlServer.databases().define(SQL_DATABASE_NAME).withStandardEdition(SqlDatabaseStandardServiceObjective.S0).createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
Assertions.assertTrue(sqlServer.databases().list().size() > 0);
TransparentDataEncryption transparentDataEncryption = sqlDatabase.getTransparentDataEncryption();
Assertions.assertNotNull(transparentDataEncryption.status());
List<TransparentDataEncryptionActivity> transparentDataEncryptionActivities =
transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
transparentDataEncryption = transparentDataEncryption.updateStatus(TransparentDataEncryptionStatus.ENABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.ENABLED);
transparentDataEncryptionActivities = transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
transparentDataEncryption =
sqlDatabase.getTransparentDataEncryption().updateStatus(TransparentDataEncryptionStatus.DISABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.DISABLED);
Assertions.assertEquals(transparentDataEncryption.sqlServerName(), sqlServerName);
Assertions.assertEquals(transparentDataEncryption.databaseName(), SQL_DATABASE_NAME);
Assertions.assertNotNull(transparentDataEncryption.name());
Assertions.assertNotNull(transparentDataEncryption.id());
Map<String, ServiceTierAdvisor> serviceTierAdvisors = sqlDatabase.listServiceTierAdvisors();
Assertions.assertNotNull(serviceTierAdvisors);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
Assertions.assertNotEquals(serviceTierAdvisors.size(), 0);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().refresh());
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer.elasticPools().define(SQL_ELASTIC_POOL_NAME).withStandardPool();
String anotherDatabaseName = "anotherDatabase";
SqlDatabase anotherDatabase =
sqlServer
.databases()
.define(anotherDatabaseName)
.withNewElasticPool(sqlElasticPoolCreatable)
.withSourceDatabase(sqlDatabase.id())
.withMode(CreateMode.COPY)
.create();
validateSqlDatabaseWithElasticPool(anotherDatabase, anotherDatabaseName);
sqlServer.databases().delete(anotherDatabase.name());
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
sqlDatabase = resourceStream.block();
sqlDatabase = sqlDatabase.rename("renamedDatabase");
validateSqlDatabase(sqlDatabase, "renamedDatabase");
sqlServer.databases().delete(sqlDatabase.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canManageReplicationLinks() throws Exception {
String anotherSqlServerName = sqlServerName + "another";
SqlServer sqlServer1 = createSqlServer();
SqlServer sqlServer2 = createSqlServer(anotherSqlServerName);
Mono<SqlDatabase> resourceStream =
sqlServer1
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
SqlDatabase databaseInServer1 = resourceStream.block();
validateSqlDatabase(databaseInServer1, SQL_DATABASE_NAME);
SqlDatabase databaseInServer2 =
sqlServer2
.databases()
.define(SQL_DATABASE_NAME)
.withSourceDatabase(databaseInServer1.id())
.withMode(CreateMode.ONLINE_SECONDARY)
.create();
ResourceManagerUtils.sleep(Duration.ofSeconds(2));
List<ReplicationLink> replicationLinksInDb1 =
new ArrayList<>(databaseInServer1.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb1.size(), 1);
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerDatabase(), databaseInServer2.name());
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerServer(), databaseInServer2.sqlServerName());
List<ReplicationLink> replicationLinksInDb2 =
new ArrayList<>(databaseInServer2.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb2.size(), 1);
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerDatabase(), databaseInServer1.name());
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerServer(), databaseInServer1.sqlServerName());
Assertions.assertNotNull(replicationLinksInDb1.get(0).refresh());
replicationLinksInDb2.get(0).failover();
replicationLinksInDb2.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb1.get(0).forceFailoverAllowDataLoss();
replicationLinksInDb1.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb2.get(0).delete();
Assertions.assertEquals(databaseInServer2.listReplicationLinks().size(), 0);
sqlServer1.databases().delete(databaseInServer1.name());
sqlServer2.databases().delete(databaseInServer2.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer2.resourceGroupName(), sqlServer2.name());
validateSqlServerNotFound(sqlServer2);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer1.resourceGroupName(), sqlServer1.name());
validateSqlServerNotFound(sqlServer1);
}
@Test
public void canDoOperationsOnDataWarehouse() throws Exception {
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
Assertions.assertNotNull(sqlServer.listUsageMetrics());
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withSku(DatabaseSku.DATAWAREHOUSE_DW1000C)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
Assertions.assertNotNull(sqlDatabase);
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNotNull(sqlDatabase);
Assertions.assertTrue(sqlDatabase.isDataWarehouse());
SqlWarehouse dataWarehouse = sqlServer.databases().get(SQL_DATABASE_NAME).asWarehouse();
Assertions.assertNotNull(dataWarehouse);
Assertions.assertEquals(dataWarehouse.name(), SQL_DATABASE_NAME);
Assertions.assertEquals(dataWarehouse.edition(), DatabaseEdition.DATA_WAREHOUSE);
Assertions.assertNotNull(dataWarehouse.listRestorePoints());
Assertions.assertNotNull(dataWarehouse.listUsageMetrics());
dataWarehouse.pauseDataWarehouse();
dataWarehouse.resumeDataWarehouse();
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabaseWithElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1");
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withNewElasticPool(sqlElasticPoolCreatable)
.withCollation(COLLATION)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
SqlElasticPool elasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPool(elasticPool);
validateSqlDatabaseWithElasticPool(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlDatabase
.update()
.withoutElasticPool()
.withStandardEdition(SqlDatabaseStandardServiceObjective.S3)
.apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNull(sqlDatabase.elasticPoolName());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P1).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.PREMIUM);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P1.toString());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P2).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P2.toString());
Assertions.assertEquals(sqlDatabase.requestedServiceObjectiveName(), ServiceObjectiveName.P2.toString());
sqlDatabase.update().withMaxSizeBytes(268435456000L).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.maxSizeBytes(), 268435456000L);
sqlDatabase.update().withExistingElasticPool(SQL_ELASTIC_POOL_NAME).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.elasticPoolName(), SQL_ELASTIC_POOL_NAME);
Assertions.assertNotNull(elasticPool.listActivities());
Assertions.assertNotNull(elasticPool.listDatabaseActivities());
List<SqlDatabase> databasesInElasticPool = elasticPool.listDatabases();
Assertions.assertNotNull(databasesInElasticPool);
Assertions.assertEquals(databasesInElasticPool.size(), 1);
SqlDatabase databaseInElasticPool = elasticPool.getDatabase(SQL_DATABASE_NAME);
validateSqlDatabase(databaseInElasticPool, SQL_DATABASE_NAME);
databaseInElasticPool.refresh();
validateResourceNotFound(() -> elasticPool.getDatabase("does_not_exist"));
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
SqlElasticPool sqlElasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withExistingElasticPool(sqlElasticPool)
.withCollation(COLLATION)
.createAsync();
sqlDatabase = resourceStream.block();
sqlServer.databases().delete(sqlDatabase.name());
validateSqlDatabaseNotFound("newDatabase");
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlElasticPool> resourceStream =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1")
.createAsync();
SqlElasticPool sqlElasticPool = resourceStream.block();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 0);
sqlElasticPool =
sqlElasticPool
.update()
.withReservedDtu(SqlElasticPoolBasicEDTUs.eDTU_100)
.withDatabaseMaxCapacity(20)
.withDatabaseMinCapacity(10)
.withStorageCapacity(102400 * 1024 * 1024L)
.withNewDatabase(SQL_DATABASE_NAME)
.withTag("tag2", "value2")
.apply();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 1);
Assertions.assertNotNull(sqlElasticPool.getDatabase(SQL_DATABASE_NAME));
validateSqlElasticPool(sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME));
validateListSqlElasticPool(sqlServer.elasticPools().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPoolNotFound(sqlServer, SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer.elasticPools().define("newElasticPool").withStandardPool().createAsync();
sqlElasticPool = resourceStream.block();
sqlServer.elasticPools().delete(sqlElasticPool.name());
validateSqlElasticPoolNotFound(sqlServer, "newElasticPool");
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlFirewallRule() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlFirewallRule> resourceStream =
sqlServer
.firewallRules()
.define(SQL_FIREWALLRULE_NAME)
.withIpAddressRange(START_IPADDRESS, END_IPADDRESS)
.createAsync();
SqlFirewallRule sqlFirewallRule = resourceStream.block();
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
String secondFirewallRuleName = "secondFireWallRule";
SqlFirewallRule secondFirewallRule =
sqlServer.firewallRules().define(secondFirewallRuleName).withIpAddress(START_IPADDRESS).create();
Assertions.assertNotNull(secondFirewallRule);
secondFirewallRule = sqlServer.firewallRules().get(secondFirewallRuleName);
Assertions.assertNotNull(secondFirewallRule);
Assertions.assertEquals(START_IPADDRESS, secondFirewallRule.endIpAddress());
secondFirewallRule = secondFirewallRule.update().withEndIpAddress(END_IPADDRESS).apply();
validateSqlFirewallRule(secondFirewallRule, secondFirewallRuleName);
sqlServer.firewallRules().delete(secondFirewallRuleName);
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.firewallRules().get(secondFirewallRuleName));
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
sqlFirewallRule.update().withEndIpAddress(START_IPADDRESS).apply();
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
Assertions.assertEquals(sqlFirewallRule.endIpAddress(), START_IPADDRESS);
validateListSqlFirewallRule(sqlServer.firewallRules().list());
sqlServer.firewallRules().delete(sqlFirewallRule.name());
validateSqlFirewallRuleNotFound();
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
private void validateMultiCreation(
String database2Name,
String database1InEPName,
String database2InEPName,
String elasticPool1Name,
String elasticPool2Name,
String elasticPool3Name,
SqlServer sqlServer,
boolean deleteUsingUpdate) {
validateSqlServer(sqlServer);
validateSqlServer(sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName));
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
List<SqlFirewallRule> firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(3, firewalls.size());
int startIPAddress = 0;
int endIPAddress = 0;
for (SqlFirewallRule firewall : firewalls) {
if (!firewall.name().equalsIgnoreCase(SQL_FIREWALLRULE_NAME)) {
Assertions.assertEquals(firewall.startIpAddress(), START_IPADDRESS);
if (firewall.endIpAddress().equalsIgnoreCase(START_IPADDRESS)) {
startIPAddress++;
} else if (firewall.endIpAddress().equalsIgnoreCase(END_IPADDRESS)) {
endIPAddress++;
}
}
}
Assertions.assertEquals(startIPAddress, 1);
Assertions.assertEquals(endIPAddress, 1);
Assertions.assertNotNull(sqlServer.databases().get(database2Name));
Assertions.assertNotNull(sqlServer.databases().get(database1InEPName));
Assertions.assertNotNull(sqlServer.databases().get(database2InEPName));
SqlElasticPool ep1 = sqlServer.elasticPools().get(elasticPool1Name);
validateSqlElasticPool(ep1, elasticPool1Name);
SqlElasticPool ep2 = sqlServer.elasticPools().get(elasticPool2Name);
Assertions.assertNotNull(ep2);
Assertions.assertEquals(ep2.edition(), ElasticPoolEdition.PREMIUM);
Assertions.assertEquals(ep2.listDatabases().size(), 2);
Assertions.assertNotNull(ep2.getDatabase(database1InEPName));
Assertions.assertNotNull(ep2.getDatabase(database2InEPName));
SqlElasticPool ep3 = sqlServer.elasticPools().get(elasticPool3Name);
Assertions.assertNotNull(ep3);
Assertions.assertEquals(ep3.edition(), ElasticPoolEdition.STANDARD);
if (!deleteUsingUpdate) {
sqlServer.databases().delete(database2Name);
sqlServer.databases().delete(database1InEPName);
sqlServer.databases().delete(database2InEPName);
sqlServer.databases().delete(SQL_DATABASE_NAME);
Assertions.assertEquals(ep1.listDatabases().size(), 0);
Assertions.assertEquals(ep2.listDatabases().size(), 0);
Assertions.assertEquals(ep3.listDatabases().size(), 0);
sqlServer.elasticPools().delete(elasticPool1Name);
sqlServer.elasticPools().delete(elasticPool2Name);
sqlServer.elasticPools().delete(elasticPool3Name);
firewalls = sqlServer.firewallRules().list();
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
} else {
sqlServer
.update()
.withoutDatabase(database2Name)
.withoutElasticPool(elasticPool1Name)
.withoutElasticPool(elasticPool2Name)
.withoutElasticPool(elasticPool3Name)
.withoutDatabase(database1InEPName)
.withoutDatabase(SQL_DATABASE_NAME)
.withoutDatabase(database2InEPName)
.withoutFirewallRule(SQL_FIREWALLRULE_NAME)
.apply();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(firewalls.size(), 2);
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
}
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
Assertions.assertEquals(sqlServer.databases().list().size(), 1);
}
private void validateSqlFirewallRuleNotFound() {
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.getByResourceGroup(rgName, sqlServerName)
.firewallRules()
.get(SQL_FIREWALLRULE_NAME));
}
private void validateSqlElasticPoolNotFound(SqlServer sqlServer, String elasticPoolName) {
validateResourceNotFound(() -> sqlServer.elasticPools().get(elasticPoolName));
}
private void validateSqlDatabaseNotFound(String newDatabase) {
validateResourceNotFound(
() -> sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName).databases().get(newDatabase));
}
private void validateSqlServerNotFound(SqlServer sqlServer) {
validateResourceNotFound(() -> sqlServerManager.sqlServers().getById(sqlServer.id()));
}
private void validateResourceNotFound(Supplier<Object> fetchResource) {
try {
Object result = fetchResource.get();
Assertions.assertNull(result);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
}
private SqlServer createSqlServer() {
return createSqlServer(sqlServerName);
}
private SqlServer createSqlServer(String sqlServerName) {
return sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("P@ssword~1")
.create();
}
private static void validateListSqlFirewallRule(List<SqlFirewallRule> sqlFirewallRules) {
boolean found = false;
for (SqlFirewallRule firewallRule : sqlFirewallRules) {
if (firewallRule.name().equals(SQL_FIREWALLRULE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlFirewallRule(SqlFirewallRule sqlFirewallRule, String firewallName) {
Assertions.assertNotNull(sqlFirewallRule);
Assertions.assertEquals(firewallName, sqlFirewallRule.name());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(START_IPADDRESS, sqlFirewallRule.startIpAddress());
Assertions.assertEquals(END_IPADDRESS, sqlFirewallRule.endIpAddress());
Assertions.assertEquals(rgName, sqlFirewallRule.resourceGroupName());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(Region.US_EAST, sqlFirewallRule.region());
}
private static void validateListSqlElasticPool(List<SqlElasticPool> sqlElasticPools) {
boolean found = false;
for (SqlElasticPool elasticPool : sqlElasticPools) {
if (elasticPool.name().equals(SQL_ELASTIC_POOL_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool) {
validateSqlElasticPool(sqlElasticPool, SQL_ELASTIC_POOL_NAME);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool, String elasticPoolName) {
Assertions.assertNotNull(sqlElasticPool);
Assertions.assertEquals(rgName, sqlElasticPool.resourceGroupName());
Assertions.assertEquals(elasticPoolName, sqlElasticPool.name());
Assertions.assertEquals(sqlServerName, sqlElasticPool.sqlServerName());
Assertions.assertEquals(ElasticPoolEdition.STANDARD, sqlElasticPool.edition());
Assertions.assertNotNull(sqlElasticPool.creationDate());
Assertions.assertNotEquals(0, sqlElasticPool.databaseDtuMax());
Assertions.assertNotEquals(0, sqlElasticPool.dtu());
}
private static void validateListSqlDatabase(List<SqlDatabase> sqlDatabases) {
boolean found = false;
for (SqlDatabase database : sqlDatabases) {
if (database.name().equals(SQL_DATABASE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlServer(SqlServer sqlServer) {
Assertions.assertNotNull(sqlServer);
Assertions.assertEquals(rgName, sqlServer.resourceGroupName());
Assertions.assertNotNull(sqlServer.fullyQualifiedDomainName());
Assertions.assertEquals("userName", sqlServer.administratorLogin());
}
private void validateSqlDatabase(SqlDatabase sqlDatabase, String databaseName) {
Assertions.assertNotNull(sqlDatabase);
Assertions.assertEquals(sqlDatabase.name(), databaseName);
Assertions.assertEquals(sqlServerName, sqlDatabase.sqlServerName());
Assertions.assertEquals(sqlDatabase.collation(), COLLATION);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.STANDARD);
}
private void validateSqlDatabaseWithElasticPool(SqlDatabase sqlDatabase, String databaseName) {
validateSqlDatabase(sqlDatabase, databaseName);
Assertions.assertEquals(SQL_ELASTIC_POOL_NAME, sqlDatabase.elasticPoolName());
}
@Test
public void testRandomSku() {
List<DatabaseSku> databaseSkus = new LinkedList<>(Arrays.asList(DatabaseSku.getAll().toArray(new DatabaseSku[0])));
Collections.shuffle(databaseSkus);
List<ElasticPoolSku> elasticPoolSkus = new LinkedList<>(Arrays.asList(ElasticPoolSku.getAll().toArray(new ElasticPoolSku[0])));
Collections.shuffle(elasticPoolSkus);
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
if (serviceObjective.status() != CapabilityStatus.AVAILABLE && serviceObjective.status() != CapabilityStatus.DEFAULT) {
databaseSkus.remove(DatabaseSku.fromSku(serviceObjective.sku()));
}
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
if (performance.status() != CapabilityStatus.AVAILABLE && performance.status() != CapabilityStatus.DEFAULT) {
elasticPoolSkus.remove(ElasticPoolSku.fromSku(performance.sku()));
}
});
});
});
SqlServer sqlServer = sqlServerManager.sqlServers().define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword(password())
.create();
Flux.merge(
Flux.range(0, 5)
.flatMap(i -> sqlServer.databases().define("database" + i).withSku(databaseSkus.get(i)).createAsync().cast(Indexable.class)),
Flux.range(0, 5)
.flatMap(i -> sqlServer.elasticPools().define("elasticPool" + i).withSku(elasticPoolSkus.get(i)).createAsync().cast(Indexable.class))
)
.blockLast();
}
@Test
@Disabled("Only run for updating sku")
public void generateSku() throws IOException {
StringBuilder databaseSkuBuilder = new StringBuilder();
StringBuilder elasticPoolSkuBuilder = new StringBuilder();
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
addStaticSkuDefinition(databaseSkuBuilder, edition.name(), serviceObjective.name(), serviceObjective.sku(), "DatabaseSku");
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
String detailName = String.format("%s_%d", performance.sku().name(), performance.sku().capacity());
addStaticSkuDefinition(elasticPoolSkuBuilder, edition.name(), detailName, performance.sku(), "ElasticPoolSku");
});
});
});
String databaseSku = new String(readAllBytes(getClass().getResourceAsStream("/DatabaseSku.java")), StandardCharsets.UTF_8);
databaseSku = databaseSku.replace("<Generated>", databaseSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/DatabaseSku.java").toPath(), databaseSku.getBytes(StandardCharsets.UTF_8));
String elasticPoolSku = new String(readAllBytes(getClass().getResourceAsStream("/ElasticPoolSku.java")), StandardCharsets.UTF_8);
elasticPoolSku = elasticPoolSku.replace("<Generated>", elasticPoolSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/ElasticPoolSku.java").toPath(), elasticPoolSku.getBytes(StandardCharsets.UTF_8));
sqlServerManager.resourceManager().resourceGroups().define(rgName).withRegion(Region.US_EAST).create();
}
private byte[] readAllBytes(InputStream inputStream) throws IOException {
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
byte[] data = new byte[4096];
while (true) {
int size = inputStream.read(data);
if (size > 0) {
outputStream.write(data, 0, size);
} else {
return outputStream.toByteArray();
}
}
}
}
private void addStaticSkuDefinition(StringBuilder builder, String edition, String detailName, Sku sku, String className) {
builder
.append("\n ").append("/** ").append(edition).append(" Edition with ").append(detailName).append(" sku. */")
.append("\n ").append("public static final ").append(className).append(" ").append(String.format("%s_%s", edition.toUpperCase(Locale.ROOT), detailName.toUpperCase(Locale.ROOT))).append(" =")
.append("\n new ").append(className).append("(")
.append(sku.name() == null ? null : "\"" + sku.name() + "\"")
.append(", ")
.append(sku.tier() == null ? null : "\"" + sku.tier() + "\"")
.append(", ")
.append(sku.family() == null ? null : "\"" + sku.family() + "\"")
.append(", ")
.append(sku.capacity())
.append(", ")
.append(sku.size() == null ? null : "\"" + sku.size() + "\"")
.append(");");
}
} | |
ok, please uncomment this and add session-records once RECORD succeeds | public void canCRUDSqlServerWithFirewallRule() throws Exception {
String sqlServerAdminName = "sqladmin";
String id = generateRandomUuid();
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(password())
.withActiveDirectoryAdministrator("DSEng", id)
.withoutAccessFromAzureServices()
.defineFirewallRule("somefirewallrule1")
.withIpAddress("0.0.0.1")
.attach()
.withTag("tag1", "value1")
.create();
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
SqlActiveDirectoryAdministrator sqlADAdmin = sqlServer.getActiveDirectoryAdministrator();
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEng", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlADAdmin = sqlServer.setActiveDirectoryAdministrator("DSEngAll", id);
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEngAll", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlServer.removeActiveDirectoryAdministrator();
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.getActiveDirectoryAdministrator());
SqlFirewallRule firewallRule =
sqlServerManager.sqlServers().firewallRules().getBySqlServer(rgName, sqlServerName, "somefirewallrule1");
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.1", firewallRule.endIpAddress());
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps"));
sqlServer.enableAccessFromAzureServices();
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps");
Assertions.assertEquals("0.0.0.0", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.0", firewallRule.endIpAddress());
sqlServer.update().defineFirewallRule("newFirewallRule1")
.withIpAddress("0.0.0.2")
.attach()
.apply();
sqlServer.firewallRules().delete("newFirewallRule2");
final SqlServer finalSqlServer1 = sqlServer;
validateResourceNotFound(() -> finalSqlServer1.firewallRules().get("newFirewallRule2"));
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.define("newFirewallRule2")
.withExistingSqlServer(rgName, sqlServerName)
.withIpAddress("0.0.0.3")
.create();
Assertions.assertEquals("0.0.0.3", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
firewallRule = firewallRule.update().withStartIpAddress("0.0.0.1").apply();
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
sqlServer.firewallRules().delete("somefirewallrule1");
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "somefirewallrule1"));
firewallRule = sqlServer.firewallRules().define("somefirewallrule2").withIpAddress("0.0.0.4").create();
Assertions.assertEquals("0.0.0.4", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.4", firewallRule.endIpAddress());
firewallRule.delete();
} | public void canCRUDSqlServerWithFirewallRule() throws Exception {
String sqlServerAdminName = "sqladmin";
String id = generateRandomUuid();
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(password())
.withActiveDirectoryAdministrator("DSEng", id)
.withoutAccessFromAzureServices()
.defineFirewallRule("somefirewallrule1")
.withIpAddress("0.0.0.1")
.attach()
.withTag("tag1", "value1")
.create();
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertEquals(sqlServerAdminName, sqlServer.administratorLogin());
Assertions.assertEquals("v12.0", sqlServer.kind());
Assertions.assertEquals("12.0", sqlServer.version());
SqlActiveDirectoryAdministrator sqlADAdmin = sqlServer.getActiveDirectoryAdministrator();
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEng", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlADAdmin = sqlServer.setActiveDirectoryAdministrator("DSEngAll", id);
Assertions.assertNotNull(sqlADAdmin);
Assertions.assertEquals("DSEngAll", sqlADAdmin.signInName());
Assertions.assertNotNull(sqlADAdmin.id());
Assertions.assertEquals(AdministratorType.ACTIVE_DIRECTORY, sqlADAdmin.administratorType());
sqlServer.removeActiveDirectoryAdministrator();
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.getActiveDirectoryAdministrator());
SqlFirewallRule firewallRule =
sqlServerManager.sqlServers().firewallRules().getBySqlServer(rgName, sqlServerName, "somefirewallrule1");
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.1", firewallRule.endIpAddress());
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps"));
sqlServer.enableAccessFromAzureServices();
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "AllowAllWindowsAzureIps");
Assertions.assertEquals("0.0.0.0", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.0", firewallRule.endIpAddress());
sqlServer.update().defineFirewallRule("newFirewallRule1")
.withIpAddress("0.0.0.2")
.attach()
.apply();
sqlServer.firewallRules().delete("newFirewallRule2");
final SqlServer finalSqlServer1 = sqlServer;
validateResourceNotFound(() -> finalSqlServer1.firewallRules().get("newFirewallRule2"));
firewallRule =
sqlServerManager
.sqlServers()
.firewallRules()
.define("newFirewallRule2")
.withExistingSqlServer(rgName, sqlServerName)
.withIpAddress("0.0.0.3")
.create();
Assertions.assertEquals("0.0.0.3", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
firewallRule = firewallRule.update().withStartIpAddress("0.0.0.1").apply();
Assertions.assertEquals("0.0.0.1", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.3", firewallRule.endIpAddress());
sqlServer.firewallRules().delete("somefirewallrule1");
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.firewallRules()
.getBySqlServer(rgName, sqlServerName, "somefirewallrule1"));
firewallRule = sqlServer.firewallRules().define("somefirewallrule2").withIpAddress("0.0.0.4").create();
Assertions.assertEquals("0.0.0.4", firewallRule.startIpAddress());
Assertions.assertEquals("0.0.0.4", firewallRule.endIpAddress());
firewallRule.delete();
} | class SqlServerOperationsTests extends SqlServerTest {
private static final String SQL_DATABASE_NAME = "myTestDatabase2";
private static final String COLLATION = "SQL_Latin1_General_CP1_CI_AS";
private static final String SQL_ELASTIC_POOL_NAME = "testElasticPool";
private static final String SQL_FIREWALLRULE_NAME = "firewallrule1";
private static final String START_IPADDRESS = "10.102.1.10";
private static final String END_IPADDRESS = "10.102.1.12";
@Test
public void canCRUDSqlSyncMember() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String dbMemberName = "dbMember";
final String syncGroupName = "groupName";
final String syncMemberName = "memberName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbMemberName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlDatabase dbMember = sqlPrimaryServer.databases().get(dbMemberName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
SqlSyncMember sqlSyncMember =
sqlSyncGroup
.syncMembers()
.define(syncMemberName)
.withMemberSqlDatabase(dbMember)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.withDatabaseType(SyncDirection.ONE_WAY_MEMBER_TO_HUB)
.create();
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember
.update()
.withDatabaseType(SyncDirection.BIDIRECTIONAL)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.apply();
Assertions.assertFalse(sqlSyncGroup.syncMembers().list().isEmpty());
sqlSyncMember =
sqlServerManager
.sqlServers()
.syncMembers()
.getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName, syncMemberName);
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember.delete();
sqlSyncGroup.delete();
}
@Test
public void canCRUDSqlSyncGroup() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String syncGroupName = "groupName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.update().withInterval(600).withConflictResolutionPolicyMemberWins().apply();
Assertions
.assertTrue(
sqlServerManager
.sqlServers()
.syncGroups()
.listSyncDatabaseIds(Region.US_EAST)
.stream()
.findAny()
.isPresent());
Assertions.assertFalse(dbSync.syncGroups().list().isEmpty());
sqlSyncGroup =
sqlServerManager.sqlServers().syncGroups().getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName);
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.delete();
}
@Test
public void canCopySqlDatabase() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String epName = "epSample";
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST2)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineElasticPool(epName)
.withPremiumPool()
.attach()
.defineDatabase(dbName)
.withExistingElasticPool(epName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlDatabase dbSample = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbCopy =
sqlSecondaryServer
.databases()
.define("dbCopy")
.withSourceDatabase(dbSample)
.withMode(CreateMode.COPY)
.withPremiumEdition(SqlDatabasePremiumServiceObjective.P1)
.create();
Assertions.assertNotNull(dbCopy);
}
@Test
public void canCRUDSqlFailoverGroup() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String sqlOtherServerName = generateRandomResourceName("sql000", 22);
final String failoverGroupName = generateRandomResourceName("fg", 22);
final String failoverGroupName2 = generateRandomResourceName("fg2", 22);
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST2)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlServer sqlOtherServer =
sqlServerManager
.sqlServers()
.define(sqlOtherServerName)
.withRegion(Region.US_SOUTH_CENTRAL)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlFailoverGroup failoverGroup =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName)
.withManualReadWriteEndpointPolicy()
.withPartnerServerId(sqlSecondaryServer.id())
.withReadOnlyEndpointPolicyDisabled()
.create();
Assertions.assertNotNull(failoverGroup);
Assertions.assertEquals(failoverGroupName, failoverGroup.name());
Assertions.assertEquals(rgName, failoverGroup.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup.replicationRole());
Assertions.assertEquals(1, failoverGroup.partnerServers().size());
Assertions.assertEquals(sqlSecondaryServer.id(), failoverGroup.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup.databases().size());
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroupOnPartner = sqlSecondaryServer.failoverGroups().get(failoverGroup.name());
Assertions.assertEquals(failoverGroupName, failoverGroupOnPartner.name());
Assertions.assertEquals(rgName, failoverGroupOnPartner.resourceGroupName());
Assertions.assertEquals(sqlSecondaryServerName, failoverGroupOnPartner.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.SECONDARY, failoverGroupOnPartner.replicationRole());
Assertions.assertEquals(1, failoverGroupOnPartner.partnerServers().size());
Assertions.assertEquals(sqlPrimaryServer.id(), failoverGroupOnPartner.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.PRIMARY, failoverGroupOnPartner.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroupOnPartner.databases().size());
Assertions.assertEquals(0, failoverGroupOnPartner.readWriteEndpointDataLossGracePeriodMinutes());
Assertions
.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroupOnPartner.readWriteEndpointPolicy());
Assertions
.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroupOnPartner.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroup2 =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName2)
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withPartnerServerId(sqlOtherServer.id())
.withReadOnlyEndpointPolicyEnabled()
.create();
Assertions.assertNotNull(failoverGroup2);
Assertions.assertEquals(failoverGroupName2, failoverGroup2.name());
Assertions.assertEquals(rgName, failoverGroup2.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup2.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup2.replicationRole());
Assertions.assertEquals(1, failoverGroup2.partnerServers().size());
Assertions.assertEquals(sqlOtherServer.id(), failoverGroup2.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup2.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup2.databases().size());
Assertions.assertEquals(120, failoverGroup2.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup2.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup2.readOnlyEndpointPolicy());
failoverGroup
.update()
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withReadOnlyEndpointPolicyEnabled()
.withTag("tag1", "value1")
.apply();
Assertions.assertEquals(120, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup.readOnlyEndpointPolicy());
SqlDatabase db = sqlPrimaryServer.databases().get(dbName);
failoverGroup
.update()
.withManualReadWriteEndpointPolicy()
.withReadOnlyEndpointPolicyDisabled()
.withNewDatabaseId(db.id())
.apply();
Assertions.assertEquals(1, failoverGroup.databases().size());
Assertions.assertEquals(db.id(), failoverGroup.databases().get(0));
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
List<SqlFailoverGroup> failoverGroupsList = sqlPrimaryServer.failoverGroups().list();
Assertions.assertEquals(2, failoverGroupsList.size());
failoverGroupsList = sqlSecondaryServer.failoverGroups().list();
Assertions.assertEquals(1, failoverGroupsList.size());
sqlPrimaryServer.failoverGroups().delete(failoverGroup2.name());
}
@Test
public void canChangeSqlServerAndDatabaseAutomaticTuning() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlServerAutomaticTuning serverAutomaticTuning = sqlServer.getServerAutomaticTuning();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions.assertEquals(4, serverAutomaticTuning.tuningOptions().size());
serverAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningServerMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
serverAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
SqlDatabaseAutomaticTuning databaseAutomaticTuning = dbFromSample.getDatabaseAutomaticTuning();
Assertions.assertEquals(4, databaseAutomaticTuning.tuningOptions().size());
databaseAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
databaseAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCreateAndAquireServerDnsAlias() throws Exception {
String sqlServerName1 = sqlServerName + "1";
String sqlServerName2 = sqlServerName + "2";
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
SqlServer sqlServer1 =
sqlServerManager
.sqlServers()
.define(sqlServerName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer1);
SqlServerDnsAlias dnsAlias = sqlServer1.dnsAliases().define(sqlServerName).create();
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
dnsAlias = sqlServerManager.sqlServers().dnsAliases().getBySqlServer(rgName, sqlServerName1, sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
Assertions.assertEquals(1, sqlServer1.databases().list().size());
SqlServer sqlServer2 =
sqlServerManager
.sqlServers()
.define(sqlServerName2)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer2);
sqlServer2.dnsAliases().acquire(sqlServerName, sqlServer1.id());
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
dnsAlias = sqlServer2.dnsAliases().get(sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName2, dnsAlias.sqlServerName());
dnsAlias.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName1);
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName2);
}
@Test
public void canGetSqlServerCapabilitiesAndCreateIdentity() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
RegionCapabilities regionCapabilities = sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST);
Assertions.assertNotNull(regionCapabilities);
Assertions.assertNotNull(regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0"));
Assertions
.assertTrue(
regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0").supportedEditions().size() > 0);
Assertions
.assertTrue(
regionCapabilities
.supportedCapabilitiesByServerVersion()
.get("12.0")
.supportedElasticPoolEditions()
.size()
> 0);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withSystemAssignedManagedServiceIdentity()
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
sqlServer.update().withSystemAssignedManagedServiceIdentity().apply();
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCRUDSqlServerWithImportDatabase() throws Exception {
if (isPlaybackMode()) {
return;
}
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withActiveDirectoryAdministrator("DSEng", id)
.create();
SqlDatabase dbFromSample =
sqlServer
.databases()
.define("db-from-sample")
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.withTag("tag1", "value1")
.create();
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlDatabaseImportExportResponse exportedDB;
StorageAccount storageAccount = null;
try {
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
if (storageAccount == null) {
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(sqlServer.regionName())
.withExistingResourceGroup(sqlServer.resourceGroupName());
exportedDB =
dbFromSample
.exportTo(storageAccountCreatable, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} else {
exportedDB =
dbFromSample
.exportTo(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
}
SqlDatabase dbFromImport =
sqlServer
.databases()
.define("db-from-import")
.defineElasticPool("ep1")
.withBasicPool()
.attach()
.importFrom(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.withTag("tag2", "value2")
.create();
Assertions.assertNotNull(dbFromImport);
Assertions.assertEquals("ep1", dbFromImport.elasticPoolName());
dbFromImport.delete();
dbFromSample.delete();
sqlServer.elasticPools().delete("ep1");
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
@Disabled("Depends on the existing SQL server")
@Test
public void canListRecommendedElasticPools() throws Exception {
SqlServer sqlServer = sqlServerManager.sqlServers().getByResourceGroup("ans", "ans-secondary");
sqlServer
.databases()
.list()
.get(0)
.listServiceTierAdvisors()
.values()
.iterator()
.next()
.serviceLevelObjectiveUsageMetric();
Map<String, RecommendedElasticPool> recommendedElasticPools = sqlServer.listRecommendedElasticPools();
Assertions.assertNotNull(recommendedElasticPools);
}
@Test
public void canCRUDSqlServer() throws Exception {
CheckNameAvailabilityResult checkNameResult =
sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertTrue(checkNameResult.isAvailable());
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
checkNameResult = sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertFalse(checkNameResult.isAvailable());
Assertions
.assertEquals(
CheckNameAvailabilityReason.ALREADY_EXISTS.toString(), checkNameResult.unavailabilityReason());
List<ServiceObjective> serviceObjectives = sqlServer.listServiceObjectives();
Assertions.assertNotEquals(serviceObjectives.size(), 0);
Assertions.assertNotNull(serviceObjectives.get(0).refresh());
Assertions.assertNotNull(sqlServer.getServiceObjective("d1737d22-a8ea-4de7-9bd0-33395d2a7419"));
sqlServer.update().withAdministratorPassword("P@ssword~2").apply();
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canUseCoolShortcutsForResourceCreation() throws Exception {
String database2Name = "database2";
String database1InEPName = "database1InEP";
String database2InEPName = "database2InEP";
String elasticPool2Name = "elasticPool2";
String elasticPool3Name = "elasticPool3";
String elasticPool1Name = SQL_ELASTIC_POOL_NAME;
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("Password~1")
.withoutAccessFromAzureServices()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.create();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
false);
elasticPool1Name = SQL_ELASTIC_POOL_NAME + " U";
database2Name = "database2U";
database1InEPName = "database1InEPU";
database2InEPName = "database2InEPU";
elasticPool2Name = "elasticPool2U";
elasticPool3Name = "elasticPool3U";
sqlServer =
sqlServer
.update()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.withTag("tag2", "value2")
.apply();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
true);
sqlServer.refresh();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabase() throws Exception {
SqlServer sqlServer = createSqlServer();
Mono<SqlDatabase> resourceStream =
sqlServer.databases().define(SQL_DATABASE_NAME).withStandardEdition(SqlDatabaseStandardServiceObjective.S0).createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
Assertions.assertTrue(sqlServer.databases().list().size() > 0);
TransparentDataEncryption transparentDataEncryption = sqlDatabase.getTransparentDataEncryption();
Assertions.assertNotNull(transparentDataEncryption.status());
List<TransparentDataEncryptionActivity> transparentDataEncryptionActivities =
transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
transparentDataEncryption = transparentDataEncryption.updateStatus(TransparentDataEncryptionStatus.ENABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.ENABLED);
transparentDataEncryptionActivities = transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
transparentDataEncryption =
sqlDatabase.getTransparentDataEncryption().updateStatus(TransparentDataEncryptionStatus.DISABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.DISABLED);
Assertions.assertEquals(transparentDataEncryption.sqlServerName(), sqlServerName);
Assertions.assertEquals(transparentDataEncryption.databaseName(), SQL_DATABASE_NAME);
Assertions.assertNotNull(transparentDataEncryption.name());
Assertions.assertNotNull(transparentDataEncryption.id());
Map<String, ServiceTierAdvisor> serviceTierAdvisors = sqlDatabase.listServiceTierAdvisors();
Assertions.assertNotNull(serviceTierAdvisors);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
Assertions.assertNotEquals(serviceTierAdvisors.size(), 0);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().refresh());
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer.elasticPools().define(SQL_ELASTIC_POOL_NAME).withStandardPool();
String anotherDatabaseName = "anotherDatabase";
SqlDatabase anotherDatabase =
sqlServer
.databases()
.define(anotherDatabaseName)
.withNewElasticPool(sqlElasticPoolCreatable)
.withSourceDatabase(sqlDatabase.id())
.withMode(CreateMode.COPY)
.create();
validateSqlDatabaseWithElasticPool(anotherDatabase, anotherDatabaseName);
sqlServer.databases().delete(anotherDatabase.name());
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
sqlDatabase = resourceStream.block();
sqlDatabase = sqlDatabase.rename("renamedDatabase");
validateSqlDatabase(sqlDatabase, "renamedDatabase");
sqlServer.databases().delete(sqlDatabase.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canManageReplicationLinks() throws Exception {
String anotherSqlServerName = sqlServerName + "another";
SqlServer sqlServer1 = createSqlServer();
SqlServer sqlServer2 = createSqlServer(anotherSqlServerName);
Mono<SqlDatabase> resourceStream =
sqlServer1
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
SqlDatabase databaseInServer1 = resourceStream.block();
validateSqlDatabase(databaseInServer1, SQL_DATABASE_NAME);
SqlDatabase databaseInServer2 =
sqlServer2
.databases()
.define(SQL_DATABASE_NAME)
.withSourceDatabase(databaseInServer1.id())
.withMode(CreateMode.ONLINE_SECONDARY)
.create();
ResourceManagerUtils.sleep(Duration.ofSeconds(2));
List<ReplicationLink> replicationLinksInDb1 =
new ArrayList<>(databaseInServer1.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb1.size(), 1);
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerDatabase(), databaseInServer2.name());
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerServer(), databaseInServer2.sqlServerName());
List<ReplicationLink> replicationLinksInDb2 =
new ArrayList<>(databaseInServer2.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb2.size(), 1);
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerDatabase(), databaseInServer1.name());
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerServer(), databaseInServer1.sqlServerName());
Assertions.assertNotNull(replicationLinksInDb1.get(0).refresh());
replicationLinksInDb2.get(0).failover();
replicationLinksInDb2.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb1.get(0).forceFailoverAllowDataLoss();
replicationLinksInDb1.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb2.get(0).delete();
Assertions.assertEquals(databaseInServer2.listReplicationLinks().size(), 0);
sqlServer1.databases().delete(databaseInServer1.name());
sqlServer2.databases().delete(databaseInServer2.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer2.resourceGroupName(), sqlServer2.name());
validateSqlServerNotFound(sqlServer2);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer1.resourceGroupName(), sqlServer1.name());
validateSqlServerNotFound(sqlServer1);
}
@Test
public void canDoOperationsOnDataWarehouse() throws Exception {
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
Assertions.assertNotNull(sqlServer.listUsageMetrics());
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withSku(DatabaseSku.DATAWAREHOUSE_DW1000C)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
Assertions.assertNotNull(sqlDatabase);
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNotNull(sqlDatabase);
Assertions.assertTrue(sqlDatabase.isDataWarehouse());
SqlWarehouse dataWarehouse = sqlServer.databases().get(SQL_DATABASE_NAME).asWarehouse();
Assertions.assertNotNull(dataWarehouse);
Assertions.assertEquals(dataWarehouse.name(), SQL_DATABASE_NAME);
Assertions.assertEquals(dataWarehouse.edition(), DatabaseEdition.DATA_WAREHOUSE);
Assertions.assertNotNull(dataWarehouse.listRestorePoints());
Assertions.assertNotNull(dataWarehouse.listUsageMetrics());
dataWarehouse.pauseDataWarehouse();
dataWarehouse.resumeDataWarehouse();
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabaseWithElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1");
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withNewElasticPool(sqlElasticPoolCreatable)
.withCollation(COLLATION)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
SqlElasticPool elasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPool(elasticPool);
validateSqlDatabaseWithElasticPool(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlDatabase
.update()
.withoutElasticPool()
.withStandardEdition(SqlDatabaseStandardServiceObjective.S3)
.apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNull(sqlDatabase.elasticPoolName());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P1).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.PREMIUM);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P1.toString());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P2).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P2.toString());
Assertions.assertEquals(sqlDatabase.requestedServiceObjectiveName(), ServiceObjectiveName.P2.toString());
sqlDatabase.update().withMaxSizeBytes(268435456000L).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.maxSizeBytes(), 268435456000L);
sqlDatabase.update().withExistingElasticPool(SQL_ELASTIC_POOL_NAME).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.elasticPoolName(), SQL_ELASTIC_POOL_NAME);
Assertions.assertNotNull(elasticPool.listActivities());
Assertions.assertNotNull(elasticPool.listDatabaseActivities());
List<SqlDatabase> databasesInElasticPool = elasticPool.listDatabases();
Assertions.assertNotNull(databasesInElasticPool);
Assertions.assertEquals(databasesInElasticPool.size(), 1);
SqlDatabase databaseInElasticPool = elasticPool.getDatabase(SQL_DATABASE_NAME);
validateSqlDatabase(databaseInElasticPool, SQL_DATABASE_NAME);
databaseInElasticPool.refresh();
validateResourceNotFound(() -> elasticPool.getDatabase("does_not_exist"));
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
SqlElasticPool sqlElasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withExistingElasticPool(sqlElasticPool)
.withCollation(COLLATION)
.createAsync();
sqlDatabase = resourceStream.block();
sqlServer.databases().delete(sqlDatabase.name());
validateSqlDatabaseNotFound("newDatabase");
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlElasticPool> resourceStream =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1")
.createAsync();
SqlElasticPool sqlElasticPool = resourceStream.block();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 0);
sqlElasticPool =
sqlElasticPool
.update()
.withReservedDtu(SqlElasticPoolBasicEDTUs.eDTU_100)
.withDatabaseMaxCapacity(20)
.withDatabaseMinCapacity(10)
.withStorageCapacity(102400 * 1024 * 1024L)
.withNewDatabase(SQL_DATABASE_NAME)
.withTag("tag2", "value2")
.apply();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 1);
Assertions.assertNotNull(sqlElasticPool.getDatabase(SQL_DATABASE_NAME));
validateSqlElasticPool(sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME));
validateListSqlElasticPool(sqlServer.elasticPools().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPoolNotFound(sqlServer, SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer.elasticPools().define("newElasticPool").withStandardPool().createAsync();
sqlElasticPool = resourceStream.block();
sqlServer.elasticPools().delete(sqlElasticPool.name());
validateSqlElasticPoolNotFound(sqlServer, "newElasticPool");
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlFirewallRule() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlFirewallRule> resourceStream =
sqlServer
.firewallRules()
.define(SQL_FIREWALLRULE_NAME)
.withIpAddressRange(START_IPADDRESS, END_IPADDRESS)
.createAsync();
SqlFirewallRule sqlFirewallRule = resourceStream.block();
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
String secondFirewallRuleName = "secondFireWallRule";
SqlFirewallRule secondFirewallRule =
sqlServer.firewallRules().define(secondFirewallRuleName).withIpAddress(START_IPADDRESS).create();
Assertions.assertNotNull(secondFirewallRule);
secondFirewallRule = sqlServer.firewallRules().get(secondFirewallRuleName);
Assertions.assertNotNull(secondFirewallRule);
Assertions.assertEquals(START_IPADDRESS, secondFirewallRule.endIpAddress());
secondFirewallRule = secondFirewallRule.update().withEndIpAddress(END_IPADDRESS).apply();
validateSqlFirewallRule(secondFirewallRule, secondFirewallRuleName);
sqlServer.firewallRules().delete(secondFirewallRuleName);
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.firewallRules().get(secondFirewallRuleName));
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
sqlFirewallRule.update().withEndIpAddress(START_IPADDRESS).apply();
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
Assertions.assertEquals(sqlFirewallRule.endIpAddress(), START_IPADDRESS);
validateListSqlFirewallRule(sqlServer.firewallRules().list());
sqlServer.firewallRules().delete(sqlFirewallRule.name());
validateSqlFirewallRuleNotFound();
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
private void validateMultiCreation(
String database2Name,
String database1InEPName,
String database2InEPName,
String elasticPool1Name,
String elasticPool2Name,
String elasticPool3Name,
SqlServer sqlServer,
boolean deleteUsingUpdate) {
validateSqlServer(sqlServer);
validateSqlServer(sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName));
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
List<SqlFirewallRule> firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(3, firewalls.size());
int startIPAddress = 0;
int endIPAddress = 0;
for (SqlFirewallRule firewall : firewalls) {
if (!firewall.name().equalsIgnoreCase(SQL_FIREWALLRULE_NAME)) {
Assertions.assertEquals(firewall.startIpAddress(), START_IPADDRESS);
if (firewall.endIpAddress().equalsIgnoreCase(START_IPADDRESS)) {
startIPAddress++;
} else if (firewall.endIpAddress().equalsIgnoreCase(END_IPADDRESS)) {
endIPAddress++;
}
}
}
Assertions.assertEquals(startIPAddress, 1);
Assertions.assertEquals(endIPAddress, 1);
Assertions.assertNotNull(sqlServer.databases().get(database2Name));
Assertions.assertNotNull(sqlServer.databases().get(database1InEPName));
Assertions.assertNotNull(sqlServer.databases().get(database2InEPName));
SqlElasticPool ep1 = sqlServer.elasticPools().get(elasticPool1Name);
validateSqlElasticPool(ep1, elasticPool1Name);
SqlElasticPool ep2 = sqlServer.elasticPools().get(elasticPool2Name);
Assertions.assertNotNull(ep2);
Assertions.assertEquals(ep2.edition(), ElasticPoolEdition.PREMIUM);
Assertions.assertEquals(ep2.listDatabases().size(), 2);
Assertions.assertNotNull(ep2.getDatabase(database1InEPName));
Assertions.assertNotNull(ep2.getDatabase(database2InEPName));
SqlElasticPool ep3 = sqlServer.elasticPools().get(elasticPool3Name);
Assertions.assertNotNull(ep3);
Assertions.assertEquals(ep3.edition(), ElasticPoolEdition.STANDARD);
if (!deleteUsingUpdate) {
sqlServer.databases().delete(database2Name);
sqlServer.databases().delete(database1InEPName);
sqlServer.databases().delete(database2InEPName);
sqlServer.databases().delete(SQL_DATABASE_NAME);
Assertions.assertEquals(ep1.listDatabases().size(), 0);
Assertions.assertEquals(ep2.listDatabases().size(), 0);
Assertions.assertEquals(ep3.listDatabases().size(), 0);
sqlServer.elasticPools().delete(elasticPool1Name);
sqlServer.elasticPools().delete(elasticPool2Name);
sqlServer.elasticPools().delete(elasticPool3Name);
firewalls = sqlServer.firewallRules().list();
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
} else {
sqlServer
.update()
.withoutDatabase(database2Name)
.withoutElasticPool(elasticPool1Name)
.withoutElasticPool(elasticPool2Name)
.withoutElasticPool(elasticPool3Name)
.withoutDatabase(database1InEPName)
.withoutDatabase(SQL_DATABASE_NAME)
.withoutDatabase(database2InEPName)
.withoutFirewallRule(SQL_FIREWALLRULE_NAME)
.apply();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(firewalls.size(), 2);
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
}
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
Assertions.assertEquals(sqlServer.databases().list().size(), 1);
}
private void validateSqlFirewallRuleNotFound() {
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.getByResourceGroup(rgName, sqlServerName)
.firewallRules()
.get(SQL_FIREWALLRULE_NAME));
}
private void validateSqlElasticPoolNotFound(SqlServer sqlServer, String elasticPoolName) {
validateResourceNotFound(() -> sqlServer.elasticPools().get(elasticPoolName));
}
private void validateSqlDatabaseNotFound(String newDatabase) {
validateResourceNotFound(
() -> sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName).databases().get(newDatabase));
}
private void validateSqlServerNotFound(SqlServer sqlServer) {
validateResourceNotFound(() -> sqlServerManager.sqlServers().getById(sqlServer.id()));
}
private void validateResourceNotFound(Supplier<Object> fetchResource) {
try {
Object result = fetchResource.get();
Assertions.assertNull(result);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
}
private SqlServer createSqlServer() {
return createSqlServer(sqlServerName);
}
private SqlServer createSqlServer(String sqlServerName) {
return sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("P@ssword~1")
.create();
}
private static void validateListSqlFirewallRule(List<SqlFirewallRule> sqlFirewallRules) {
boolean found = false;
for (SqlFirewallRule firewallRule : sqlFirewallRules) {
if (firewallRule.name().equals(SQL_FIREWALLRULE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlFirewallRule(SqlFirewallRule sqlFirewallRule, String firewallName) {
Assertions.assertNotNull(sqlFirewallRule);
Assertions.assertEquals(firewallName, sqlFirewallRule.name());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(START_IPADDRESS, sqlFirewallRule.startIpAddress());
Assertions.assertEquals(END_IPADDRESS, sqlFirewallRule.endIpAddress());
Assertions.assertEquals(rgName, sqlFirewallRule.resourceGroupName());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(Region.US_EAST, sqlFirewallRule.region());
}
private static void validateListSqlElasticPool(List<SqlElasticPool> sqlElasticPools) {
boolean found = false;
for (SqlElasticPool elasticPool : sqlElasticPools) {
if (elasticPool.name().equals(SQL_ELASTIC_POOL_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool) {
validateSqlElasticPool(sqlElasticPool, SQL_ELASTIC_POOL_NAME);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool, String elasticPoolName) {
Assertions.assertNotNull(sqlElasticPool);
Assertions.assertEquals(rgName, sqlElasticPool.resourceGroupName());
Assertions.assertEquals(elasticPoolName, sqlElasticPool.name());
Assertions.assertEquals(sqlServerName, sqlElasticPool.sqlServerName());
Assertions.assertEquals(ElasticPoolEdition.STANDARD, sqlElasticPool.edition());
Assertions.assertNotNull(sqlElasticPool.creationDate());
Assertions.assertNotEquals(0, sqlElasticPool.databaseDtuMax());
Assertions.assertNotEquals(0, sqlElasticPool.dtu());
}
private static void validateListSqlDatabase(List<SqlDatabase> sqlDatabases) {
boolean found = false;
for (SqlDatabase database : sqlDatabases) {
if (database.name().equals(SQL_DATABASE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlServer(SqlServer sqlServer) {
Assertions.assertNotNull(sqlServer);
Assertions.assertEquals(rgName, sqlServer.resourceGroupName());
Assertions.assertNotNull(sqlServer.fullyQualifiedDomainName());
Assertions.assertEquals("userName", sqlServer.administratorLogin());
}
private void validateSqlDatabase(SqlDatabase sqlDatabase, String databaseName) {
Assertions.assertNotNull(sqlDatabase);
Assertions.assertEquals(sqlDatabase.name(), databaseName);
Assertions.assertEquals(sqlServerName, sqlDatabase.sqlServerName());
Assertions.assertEquals(sqlDatabase.collation(), COLLATION);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.STANDARD);
}
private void validateSqlDatabaseWithElasticPool(SqlDatabase sqlDatabase, String databaseName) {
validateSqlDatabase(sqlDatabase, databaseName);
Assertions.assertEquals(SQL_ELASTIC_POOL_NAME, sqlDatabase.elasticPoolName());
}
@Test
public void testRandomSku() {
List<DatabaseSku> databaseSkus = new LinkedList<>(Arrays.asList(DatabaseSku.getAll().toArray(new DatabaseSku[0])));
Collections.shuffle(databaseSkus);
List<ElasticPoolSku> elasticPoolSkus = new LinkedList<>(Arrays.asList(ElasticPoolSku.getAll().toArray(new ElasticPoolSku[0])));
Collections.shuffle(elasticPoolSkus);
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
if (serviceObjective.status() != CapabilityStatus.AVAILABLE && serviceObjective.status() != CapabilityStatus.DEFAULT) {
databaseSkus.remove(DatabaseSku.fromSku(serviceObjective.sku()));
}
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
if (performance.status() != CapabilityStatus.AVAILABLE && performance.status() != CapabilityStatus.DEFAULT) {
elasticPoolSkus.remove(ElasticPoolSku.fromSku(performance.sku()));
}
});
});
});
SqlServer sqlServer = sqlServerManager.sqlServers().define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword(password())
.create();
Flux.merge(
Flux.range(0, 5)
.flatMap(i -> sqlServer.databases().define("database" + i).withSku(databaseSkus.get(i)).createAsync().cast(Indexable.class)),
Flux.range(0, 5)
.flatMap(i -> sqlServer.elasticPools().define("elasticPool" + i).withSku(elasticPoolSkus.get(i)).createAsync().cast(Indexable.class))
)
.blockLast();
}
@Test
@Disabled("Only run for updating sku")
public void generateSku() throws IOException {
StringBuilder databaseSkuBuilder = new StringBuilder();
StringBuilder elasticPoolSkuBuilder = new StringBuilder();
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
addStaticSkuDefinition(databaseSkuBuilder, edition.name(), serviceObjective.name(), serviceObjective.sku(), "DatabaseSku");
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
String detailName = String.format("%s_%d", performance.sku().name(), performance.sku().capacity());
addStaticSkuDefinition(elasticPoolSkuBuilder, edition.name(), detailName, performance.sku(), "ElasticPoolSku");
});
});
});
String databaseSku = new String(readAllBytes(getClass().getResourceAsStream("/DatabaseSku.java")), StandardCharsets.UTF_8);
databaseSku = databaseSku.replace("<Generated>", databaseSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/DatabaseSku.java").toPath(), databaseSku.getBytes(StandardCharsets.UTF_8));
String elasticPoolSku = new String(readAllBytes(getClass().getResourceAsStream("/ElasticPoolSku.java")), StandardCharsets.UTF_8);
elasticPoolSku = elasticPoolSku.replace("<Generated>", elasticPoolSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/ElasticPoolSku.java").toPath(), elasticPoolSku.getBytes(StandardCharsets.UTF_8));
sqlServerManager.resourceManager().resourceGroups().define(rgName).withRegion(Region.US_EAST).create();
}
private byte[] readAllBytes(InputStream inputStream) throws IOException {
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
byte[] data = new byte[4096];
while (true) {
int size = inputStream.read(data);
if (size > 0) {
outputStream.write(data, 0, size);
} else {
return outputStream.toByteArray();
}
}
}
}
private void addStaticSkuDefinition(StringBuilder builder, String edition, String detailName, Sku sku, String className) {
builder
.append("\n ").append("/** ").append(edition).append(" Edition with ").append(detailName).append(" sku. */")
.append("\n ").append("public static final ").append(className).append(" ").append(String.format("%s_%s", edition.toUpperCase(Locale.ROOT), detailName.toUpperCase(Locale.ROOT))).append(" =")
.append("\n new ").append(className).append("(")
.append(sku.name() == null ? null : "\"" + sku.name() + "\"")
.append(", ")
.append(sku.tier() == null ? null : "\"" + sku.tier() + "\"")
.append(", ")
.append(sku.family() == null ? null : "\"" + sku.family() + "\"")
.append(", ")
.append(sku.capacity())
.append(", ")
.append(sku.size() == null ? null : "\"" + sku.size() + "\"")
.append(");");
}
} | class SqlServerOperationsTests extends SqlServerTest {
private static final String SQL_DATABASE_NAME = "myTestDatabase2";
private static final String COLLATION = "SQL_Latin1_General_CP1_CI_AS";
private static final String SQL_ELASTIC_POOL_NAME = "testElasticPool";
private static final String SQL_FIREWALLRULE_NAME = "firewallrule1";
private static final String START_IPADDRESS = "10.102.1.10";
private static final String END_IPADDRESS = "10.102.1.12";
@Test
public void canCRUDSqlSyncMember() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String dbMemberName = "dbMember";
final String syncGroupName = "groupName";
final String syncMemberName = "memberName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbMemberName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlDatabase dbMember = sqlPrimaryServer.databases().get(dbMemberName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
SqlSyncMember sqlSyncMember =
sqlSyncGroup
.syncMembers()
.define(syncMemberName)
.withMemberSqlDatabase(dbMember)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.withDatabaseType(SyncDirection.ONE_WAY_MEMBER_TO_HUB)
.create();
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember
.update()
.withDatabaseType(SyncDirection.BIDIRECTIONAL)
.withMemberUserName(administratorLogin)
.withMemberPassword(administratorPassword)
.withMemberDatabaseType(SyncMemberDbType.AZURE_SQL_DATABASE)
.apply();
Assertions.assertFalse(sqlSyncGroup.syncMembers().list().isEmpty());
sqlSyncMember =
sqlServerManager
.sqlServers()
.syncMembers()
.getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName, syncMemberName);
Assertions.assertNotNull(sqlSyncMember);
sqlSyncMember.delete();
sqlSyncGroup.delete();
}
@Test
public void canCRUDSqlSyncGroup() throws Exception {
final String dbName = "dbSample";
final String dbSyncName = "dbSync";
final String syncGroupName = "groupName";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.defineDatabase(dbSyncName)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlDatabase dbSource = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbSync = sqlPrimaryServer.databases().get(dbSyncName);
SqlSyncGroup sqlSyncGroup =
dbSync
.syncGroups()
.define(syncGroupName)
.withSyncDatabaseId(dbSource.id())
.withDatabaseUserName(administratorLogin)
.withDatabasePassword(administratorPassword)
.withConflictResolutionPolicyHubWins()
.withInterval(-1)
.create();
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.update().withInterval(600).withConflictResolutionPolicyMemberWins().apply();
Assertions
.assertTrue(
sqlServerManager
.sqlServers()
.syncGroups()
.listSyncDatabaseIds(Region.US_EAST)
.stream()
.findAny()
.isPresent());
Assertions.assertFalse(dbSync.syncGroups().list().isEmpty());
sqlSyncGroup =
sqlServerManager.sqlServers().syncGroups().getBySqlServer(rgName, sqlServerName, dbSyncName, syncGroupName);
Assertions.assertNotNull(sqlSyncGroup);
sqlSyncGroup.delete();
}
@Test
public void canCopySqlDatabase() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String epName = "epSample";
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST2)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineElasticPool(epName)
.withPremiumPool()
.attach()
.defineDatabase(dbName)
.withExistingElasticPool(epName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlDatabase dbSample = sqlPrimaryServer.databases().get(dbName);
SqlDatabase dbCopy =
sqlSecondaryServer
.databases()
.define("dbCopy")
.withSourceDatabase(dbSample)
.withMode(CreateMode.COPY)
.withPremiumEdition(SqlDatabasePremiumServiceObjective.P1)
.create();
Assertions.assertNotNull(dbCopy);
}
@Test
public void canCRUDSqlFailoverGroup() throws Exception {
final String sqlPrimaryServerName = generateRandomResourceName("sqlpri", 22);
final String sqlSecondaryServerName = generateRandomResourceName("sqlsec", 22);
final String sqlOtherServerName = generateRandomResourceName("sql000", 22);
final String failoverGroupName = generateRandomResourceName("fg", 22);
final String failoverGroupName2 = generateRandomResourceName("fg2", 22);
final String dbName = "dbSample";
final String administratorLogin = "sqladmin";
final String administratorPassword = password();
SqlServer sqlPrimaryServer =
sqlServerManager
.sqlServers()
.define(sqlPrimaryServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.defineDatabase(dbName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.attach()
.create();
SqlServer sqlSecondaryServer =
sqlServerManager
.sqlServers()
.define(sqlSecondaryServerName)
.withRegion(Region.US_EAST2)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlServer sqlOtherServer =
sqlServerManager
.sqlServers()
.define(sqlOtherServerName)
.withRegion(Region.US_SOUTH_CENTRAL)
.withExistingResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.create();
SqlFailoverGroup failoverGroup =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName)
.withManualReadWriteEndpointPolicy()
.withPartnerServerId(sqlSecondaryServer.id())
.withReadOnlyEndpointPolicyDisabled()
.create();
Assertions.assertNotNull(failoverGroup);
Assertions.assertEquals(failoverGroupName, failoverGroup.name());
Assertions.assertEquals(rgName, failoverGroup.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup.replicationRole());
Assertions.assertEquals(1, failoverGroup.partnerServers().size());
Assertions.assertEquals(sqlSecondaryServer.id(), failoverGroup.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup.databases().size());
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroupOnPartner = sqlSecondaryServer.failoverGroups().get(failoverGroup.name());
Assertions.assertEquals(failoverGroupName, failoverGroupOnPartner.name());
Assertions.assertEquals(rgName, failoverGroupOnPartner.resourceGroupName());
Assertions.assertEquals(sqlSecondaryServerName, failoverGroupOnPartner.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.SECONDARY, failoverGroupOnPartner.replicationRole());
Assertions.assertEquals(1, failoverGroupOnPartner.partnerServers().size());
Assertions.assertEquals(sqlPrimaryServer.id(), failoverGroupOnPartner.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.PRIMARY, failoverGroupOnPartner.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroupOnPartner.databases().size());
Assertions.assertEquals(0, failoverGroupOnPartner.readWriteEndpointDataLossGracePeriodMinutes());
Assertions
.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroupOnPartner.readWriteEndpointPolicy());
Assertions
.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroupOnPartner.readOnlyEndpointPolicy());
SqlFailoverGroup failoverGroup2 =
sqlPrimaryServer
.failoverGroups()
.define(failoverGroupName2)
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withPartnerServerId(sqlOtherServer.id())
.withReadOnlyEndpointPolicyEnabled()
.create();
Assertions.assertNotNull(failoverGroup2);
Assertions.assertEquals(failoverGroupName2, failoverGroup2.name());
Assertions.assertEquals(rgName, failoverGroup2.resourceGroupName());
Assertions.assertEquals(sqlPrimaryServerName, failoverGroup2.sqlServerName());
Assertions.assertEquals(FailoverGroupReplicationRole.PRIMARY, failoverGroup2.replicationRole());
Assertions.assertEquals(1, failoverGroup2.partnerServers().size());
Assertions.assertEquals(sqlOtherServer.id(), failoverGroup2.partnerServers().get(0).id());
Assertions
.assertEquals(
FailoverGroupReplicationRole.SECONDARY, failoverGroup2.partnerServers().get(0).replicationRole());
Assertions.assertEquals(0, failoverGroup2.databases().size());
Assertions.assertEquals(120, failoverGroup2.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup2.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup2.readOnlyEndpointPolicy());
failoverGroup
.update()
.withAutomaticReadWriteEndpointPolicyAndDataLossGracePeriod(120)
.withReadOnlyEndpointPolicyEnabled()
.withTag("tag1", "value1")
.apply();
Assertions.assertEquals(120, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.AUTOMATIC, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.ENABLED, failoverGroup.readOnlyEndpointPolicy());
SqlDatabase db = sqlPrimaryServer.databases().get(dbName);
failoverGroup
.update()
.withManualReadWriteEndpointPolicy()
.withReadOnlyEndpointPolicyDisabled()
.withNewDatabaseId(db.id())
.apply();
Assertions.assertEquals(1, failoverGroup.databases().size());
Assertions.assertEquals(db.id(), failoverGroup.databases().get(0));
Assertions.assertEquals(0, failoverGroup.readWriteEndpointDataLossGracePeriodMinutes());
Assertions.assertEquals(ReadWriteEndpointFailoverPolicy.MANUAL, failoverGroup.readWriteEndpointPolicy());
Assertions.assertEquals(ReadOnlyEndpointFailoverPolicy.DISABLED, failoverGroup.readOnlyEndpointPolicy());
List<SqlFailoverGroup> failoverGroupsList = sqlPrimaryServer.failoverGroups().list();
Assertions.assertEquals(2, failoverGroupsList.size());
failoverGroupsList = sqlSecondaryServer.failoverGroups().list();
Assertions.assertEquals(1, failoverGroupsList.size());
sqlPrimaryServer.failoverGroups().delete(failoverGroup2.name());
}
@Test
public void canChangeSqlServerAndDatabaseAutomaticTuning() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlServerAutomaticTuning serverAutomaticTuning = sqlServer.getServerAutomaticTuning();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions.assertEquals(4, serverAutomaticTuning.tuningOptions().size());
serverAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningServerMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningServerMode.AUTO, serverAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
serverAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
serverAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
serverAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
SqlDatabaseAutomaticTuning databaseAutomaticTuning = dbFromSample.getDatabaseAutomaticTuning();
Assertions.assertEquals(4, databaseAutomaticTuning.tuningOptions().size());
databaseAutomaticTuning
.update()
.withAutomaticTuningMode(AutomaticTuningMode.AUTO)
.withAutomaticTuningOption("createIndex", AutomaticTuningOptionModeDesired.OFF)
.withAutomaticTuningOption("dropIndex", AutomaticTuningOptionModeDesired.ON)
.withAutomaticTuningOption("forceLastGoodPlan", AutomaticTuningOptionModeDesired.DEFAULT)
.apply();
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.desiredState());
Assertions.assertEquals(AutomaticTuningMode.AUTO, databaseAutomaticTuning.actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.OFF,
databaseAutomaticTuning.tuningOptions().get("createIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").desiredState());
Assertions
.assertEquals(
AutomaticTuningOptionModeActual.ON,
databaseAutomaticTuning.tuningOptions().get("dropIndex").actualState());
Assertions
.assertEquals(
AutomaticTuningOptionModeDesired.DEFAULT,
databaseAutomaticTuning.tuningOptions().get("forceLastGoodPlan").desiredState());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCreateAndAquireServerDnsAlias() throws Exception {
String sqlServerName1 = sqlServerName + "1";
String sqlServerName2 = sqlServerName + "2";
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
SqlServer sqlServer1 =
sqlServerManager
.sqlServers()
.define(sqlServerName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer1);
SqlServerDnsAlias dnsAlias = sqlServer1.dnsAliases().define(sqlServerName).create();
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
dnsAlias = sqlServerManager.sqlServers().dnsAliases().getBySqlServer(rgName, sqlServerName1, sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName1, dnsAlias.sqlServerName());
Assertions.assertEquals(1, sqlServer1.databases().list().size());
SqlServer sqlServer2 =
sqlServerManager
.sqlServers()
.define(sqlServerName2)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.create();
Assertions.assertNotNull(sqlServer2);
sqlServer2.dnsAliases().acquire(sqlServerName, sqlServer1.id());
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
dnsAlias = sqlServer2.dnsAliases().get(sqlServerName);
Assertions.assertNotNull(dnsAlias);
Assertions.assertEquals(rgName, dnsAlias.resourceGroupName());
Assertions.assertEquals(sqlServerName2, dnsAlias.sqlServerName());
dnsAlias.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName1);
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName2);
}
@Test
public void canGetSqlServerCapabilitiesAndCreateIdentity() throws Exception {
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String databaseName = "db-from-sample";
RegionCapabilities regionCapabilities = sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST);
Assertions.assertNotNull(regionCapabilities);
Assertions.assertNotNull(regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0"));
Assertions
.assertTrue(
regionCapabilities.supportedCapabilitiesByServerVersion().get("12.0").supportedEditions().size() > 0);
Assertions
.assertTrue(
regionCapabilities
.supportedCapabilitiesByServerVersion()
.get("12.0")
.supportedElasticPoolEditions()
.size()
> 0);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withSystemAssignedManagedServiceIdentity()
.defineDatabase(databaseName)
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.attach()
.create();
SqlDatabase dbFromSample = sqlServer.databases().get(databaseName);
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
sqlServer.update().withSystemAssignedManagedServiceIdentity().apply();
Assertions.assertTrue(sqlServer.isManagedServiceIdentityEnabled());
Assertions.assertEquals(sqlServerManager.tenantId(), sqlServer.systemAssignedManagedServiceIdentityTenantId());
Assertions.assertNotNull(sqlServer.systemAssignedManagedServiceIdentityPrincipalId());
dbFromSample.delete();
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
public void canCRUDSqlServerWithImportDatabase() throws Exception {
if (isPlaybackMode()) {
return;
}
String sqlServerAdminName = "sqladmin";
String sqlServerAdminPassword = password();
String id = generateRandomUuid();
String storageName = generateRandomResourceName(sqlServerName, 22);
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(sqlServerAdminName)
.withAdministratorPassword(sqlServerAdminPassword)
.withActiveDirectoryAdministrator("DSEng", id)
.create();
SqlDatabase dbFromSample =
sqlServer
.databases()
.define("db-from-sample")
.fromSample(SampleName.ADVENTURE_WORKS_LT)
.withBasicEdition()
.withTag("tag1", "value1")
.create();
Assertions.assertNotNull(dbFromSample);
Assertions.assertEquals(DatabaseEdition.BASIC, dbFromSample.edition());
SqlDatabaseImportExportResponse exportedDB;
StorageAccount storageAccount = null;
try {
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
if (storageAccount == null) {
Creatable<StorageAccount> storageAccountCreatable =
storageManager
.storageAccounts()
.define(storageName)
.withRegion(sqlServer.regionName())
.withExistingResourceGroup(sqlServer.resourceGroupName());
exportedDB =
dbFromSample
.exportTo(storageAccountCreatable, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
storageAccount =
storageManager.storageAccounts().getByResourceGroup(sqlServer.resourceGroupName(), storageName);
} else {
exportedDB =
dbFromSample
.exportTo(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.execute();
}
SqlDatabase dbFromImport =
sqlServer
.databases()
.define("db-from-import")
.defineElasticPool("ep1")
.withBasicPool()
.attach()
.importFrom(storageAccount, "from-sample", "dbfromsample.bacpac")
.withSqlAdministratorLoginAndPassword(sqlServerAdminName, sqlServerAdminPassword)
.withTag("tag2", "value2")
.create();
Assertions.assertNotNull(dbFromImport);
Assertions.assertEquals("ep1", dbFromImport.elasticPoolName());
dbFromImport.delete();
dbFromSample.delete();
sqlServer.elasticPools().delete("ep1");
sqlServerManager.sqlServers().deleteByResourceGroup(rgName, sqlServerName);
}
@Test
@Disabled("Depends on the existing SQL server")
@Test
public void canListRecommendedElasticPools() throws Exception {
SqlServer sqlServer = sqlServerManager.sqlServers().getByResourceGroup("ans", "ans-secondary");
sqlServer
.databases()
.list()
.get(0)
.listServiceTierAdvisors()
.values()
.iterator()
.next()
.serviceLevelObjectiveUsageMetric();
Map<String, RecommendedElasticPool> recommendedElasticPools = sqlServer.listRecommendedElasticPools();
Assertions.assertNotNull(recommendedElasticPools);
}
@Test
public void canCRUDSqlServer() throws Exception {
CheckNameAvailabilityResult checkNameResult =
sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertTrue(checkNameResult.isAvailable());
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
checkNameResult = sqlServerManager.sqlServers().checkNameAvailability(sqlServerName);
Assertions.assertFalse(checkNameResult.isAvailable());
Assertions
.assertEquals(
CheckNameAvailabilityReason.ALREADY_EXISTS.toString(), checkNameResult.unavailabilityReason());
List<ServiceObjective> serviceObjectives = sqlServer.listServiceObjectives();
Assertions.assertNotEquals(serviceObjectives.size(), 0);
Assertions.assertNotNull(serviceObjectives.get(0).refresh());
Assertions.assertNotNull(sqlServer.getServiceObjective("d1737d22-a8ea-4de7-9bd0-33395d2a7419"));
sqlServer.update().withAdministratorPassword("P@ssword~2").apply();
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canUseCoolShortcutsForResourceCreation() throws Exception {
String database2Name = "database2";
String database1InEPName = "database1InEP";
String database2InEPName = "database2InEP";
String elasticPool2Name = "elasticPool2";
String elasticPool3Name = "elasticPool3";
String elasticPool1Name = SQL_ELASTIC_POOL_NAME;
SqlServer sqlServer =
sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("Password~1")
.withoutAccessFromAzureServices()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.create();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
false);
elasticPool1Name = SQL_ELASTIC_POOL_NAME + " U";
database2Name = "database2U";
database1InEPName = "database1InEPU";
database2InEPName = "database2InEPU";
elasticPool2Name = "elasticPool2U";
elasticPool3Name = "elasticPool3U";
sqlServer =
sqlServer
.update()
.defineDatabase(SQL_DATABASE_NAME).attach()
.defineDatabase(database2Name).attach()
.defineElasticPool(elasticPool1Name).withStandardPool().attach()
.defineElasticPool(elasticPool2Name).withPremiumPool().attach()
.defineElasticPool(elasticPool3Name).withStandardPool().attach()
.defineDatabase(database1InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineDatabase(database2InEPName).withExistingElasticPool(elasticPool2Name).attach()
.defineFirewallRule(SQL_FIREWALLRULE_NAME).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddressRange(START_IPADDRESS, END_IPADDRESS).attach()
.defineFirewallRule(generateRandomResourceName("firewall_", 15)).withIpAddress(START_IPADDRESS).attach()
.withTag("tag2", "value2")
.apply();
validateMultiCreation(
database2Name,
database1InEPName,
database2InEPName,
elasticPool1Name,
elasticPool2Name,
elasticPool3Name,
sqlServer,
true);
sqlServer.refresh();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
PagedIterable<SqlServer> sqlServers = sqlServerManager.sqlServers().listByResourceGroup(rgName);
boolean found = false;
for (SqlServer server : sqlServers) {
if (server.name().equals(sqlServerName)) {
found = true;
}
}
Assertions.assertTrue(found);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
Assertions.assertNotNull(sqlServer);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabase() throws Exception {
SqlServer sqlServer = createSqlServer();
Mono<SqlDatabase> resourceStream =
sqlServer.databases().define(SQL_DATABASE_NAME).withStandardEdition(SqlDatabaseStandardServiceObjective.S0).createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
Assertions.assertTrue(sqlServer.databases().list().size() > 0);
TransparentDataEncryption transparentDataEncryption = sqlDatabase.getTransparentDataEncryption();
Assertions.assertNotNull(transparentDataEncryption.status());
List<TransparentDataEncryptionActivity> transparentDataEncryptionActivities =
transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
transparentDataEncryption = transparentDataEncryption.updateStatus(TransparentDataEncryptionStatus.ENABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.ENABLED);
transparentDataEncryptionActivities = transparentDataEncryption.listActivities();
Assertions.assertNotNull(transparentDataEncryptionActivities);
ResourceManagerUtils.sleep(Duration.ofSeconds(10));
transparentDataEncryption =
sqlDatabase.getTransparentDataEncryption().updateStatus(TransparentDataEncryptionStatus.DISABLED);
Assertions.assertNotNull(transparentDataEncryption);
Assertions.assertEquals(transparentDataEncryption.status(), TransparentDataEncryptionStatus.DISABLED);
Assertions.assertEquals(transparentDataEncryption.sqlServerName(), sqlServerName);
Assertions.assertEquals(transparentDataEncryption.databaseName(), SQL_DATABASE_NAME);
Assertions.assertNotNull(transparentDataEncryption.name());
Assertions.assertNotNull(transparentDataEncryption.id());
Map<String, ServiceTierAdvisor> serviceTierAdvisors = sqlDatabase.listServiceTierAdvisors();
Assertions.assertNotNull(serviceTierAdvisors);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
Assertions.assertNotEquals(serviceTierAdvisors.size(), 0);
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().refresh());
Assertions.assertNotNull(serviceTierAdvisors.values().iterator().next().serviceLevelObjectiveUsageMetric());
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer.elasticPools().define(SQL_ELASTIC_POOL_NAME).withStandardPool();
String anotherDatabaseName = "anotherDatabase";
SqlDatabase anotherDatabase =
sqlServer
.databases()
.define(anotherDatabaseName)
.withNewElasticPool(sqlElasticPoolCreatable)
.withSourceDatabase(sqlDatabase.id())
.withMode(CreateMode.COPY)
.create();
validateSqlDatabaseWithElasticPool(anotherDatabase, anotherDatabaseName);
sqlServer.databases().delete(anotherDatabase.name());
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
sqlDatabase = resourceStream.block();
sqlDatabase = sqlDatabase.rename("renamedDatabase");
validateSqlDatabase(sqlDatabase, "renamedDatabase");
sqlServer.databases().delete(sqlDatabase.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canManageReplicationLinks() throws Exception {
String anotherSqlServerName = sqlServerName + "another";
SqlServer sqlServer1 = createSqlServer();
SqlServer sqlServer2 = createSqlServer(anotherSqlServerName);
Mono<SqlDatabase> resourceStream =
sqlServer1
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withStandardEdition(SqlDatabaseStandardServiceObjective.S0)
.createAsync();
SqlDatabase databaseInServer1 = resourceStream.block();
validateSqlDatabase(databaseInServer1, SQL_DATABASE_NAME);
SqlDatabase databaseInServer2 =
sqlServer2
.databases()
.define(SQL_DATABASE_NAME)
.withSourceDatabase(databaseInServer1.id())
.withMode(CreateMode.ONLINE_SECONDARY)
.create();
ResourceManagerUtils.sleep(Duration.ofSeconds(2));
List<ReplicationLink> replicationLinksInDb1 =
new ArrayList<>(databaseInServer1.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb1.size(), 1);
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerDatabase(), databaseInServer2.name());
Assertions.assertEquals(replicationLinksInDb1.get(0).partnerServer(), databaseInServer2.sqlServerName());
List<ReplicationLink> replicationLinksInDb2 =
new ArrayList<>(databaseInServer2.listReplicationLinks().values());
Assertions.assertEquals(replicationLinksInDb2.size(), 1);
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerDatabase(), databaseInServer1.name());
Assertions.assertEquals(replicationLinksInDb2.get(0).partnerServer(), databaseInServer1.sqlServerName());
Assertions.assertNotNull(replicationLinksInDb1.get(0).refresh());
replicationLinksInDb2.get(0).failover();
replicationLinksInDb2.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb1.get(0).forceFailoverAllowDataLoss();
replicationLinksInDb1.get(0).refresh();
ResourceManagerUtils.sleep(Duration.ofSeconds(30));
replicationLinksInDb2.get(0).delete();
Assertions.assertEquals(databaseInServer2.listReplicationLinks().size(), 0);
sqlServer1.databases().delete(databaseInServer1.name());
sqlServer2.databases().delete(databaseInServer2.name());
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer2.resourceGroupName(), sqlServer2.name());
validateSqlServerNotFound(sqlServer2);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer1.resourceGroupName(), sqlServer1.name());
validateSqlServerNotFound(sqlServer1);
}
@Test
public void canDoOperationsOnDataWarehouse() throws Exception {
SqlServer sqlServer = createSqlServer();
validateSqlServer(sqlServer);
Assertions.assertNotNull(sqlServer.listUsageMetrics());
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withCollation(COLLATION)
.withSku(DatabaseSku.DATAWAREHOUSE_DW1000C)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
Assertions.assertNotNull(sqlDatabase);
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNotNull(sqlDatabase);
Assertions.assertTrue(sqlDatabase.isDataWarehouse());
SqlWarehouse dataWarehouse = sqlServer.databases().get(SQL_DATABASE_NAME).asWarehouse();
Assertions.assertNotNull(dataWarehouse);
Assertions.assertEquals(dataWarehouse.name(), SQL_DATABASE_NAME);
Assertions.assertEquals(dataWarehouse.edition(), DatabaseEdition.DATA_WAREHOUSE);
Assertions.assertNotNull(dataWarehouse.listRestorePoints());
Assertions.assertNotNull(dataWarehouse.listUsageMetrics());
dataWarehouse.pauseDataWarehouse();
dataWarehouse.resumeDataWarehouse();
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlDatabaseWithElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
Creatable<SqlElasticPool> sqlElasticPoolCreatable =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1");
Mono<SqlDatabase> resourceStream =
sqlServer
.databases()
.define(SQL_DATABASE_NAME)
.withNewElasticPool(sqlElasticPoolCreatable)
.withCollation(COLLATION)
.createAsync();
SqlDatabase sqlDatabase = resourceStream.block();
validateSqlDatabase(sqlDatabase, SQL_DATABASE_NAME);
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
SqlElasticPool elasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPool(elasticPool);
validateSqlDatabaseWithElasticPool(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateListSqlDatabase(sqlServer.databases().list());
sqlDatabase
.update()
.withoutElasticPool()
.withStandardEdition(SqlDatabaseStandardServiceObjective.S3)
.apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertNull(sqlDatabase.elasticPoolName());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P1).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.PREMIUM);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P1.toString());
sqlDatabase.update().withPremiumEdition(SqlDatabasePremiumServiceObjective.P2).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.currentServiceObjectiveName(), ServiceObjectiveName.P2.toString());
Assertions.assertEquals(sqlDatabase.requestedServiceObjectiveName(), ServiceObjectiveName.P2.toString());
sqlDatabase.update().withMaxSizeBytes(268435456000L).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.maxSizeBytes(), 268435456000L);
sqlDatabase.update().withExistingElasticPool(SQL_ELASTIC_POOL_NAME).apply();
sqlDatabase = sqlServer.databases().get(SQL_DATABASE_NAME);
Assertions.assertEquals(sqlDatabase.elasticPoolName(), SQL_ELASTIC_POOL_NAME);
Assertions.assertNotNull(elasticPool.listActivities());
Assertions.assertNotNull(elasticPool.listDatabaseActivities());
List<SqlDatabase> databasesInElasticPool = elasticPool.listDatabases();
Assertions.assertNotNull(databasesInElasticPool);
Assertions.assertEquals(databasesInElasticPool.size(), 1);
SqlDatabase databaseInElasticPool = elasticPool.getDatabase(SQL_DATABASE_NAME);
validateSqlDatabase(databaseInElasticPool, SQL_DATABASE_NAME);
databaseInElasticPool.refresh();
validateResourceNotFound(() -> elasticPool.getDatabase("does_not_exist"));
sqlServer.databases().delete(SQL_DATABASE_NAME);
validateSqlDatabaseNotFound(SQL_DATABASE_NAME);
SqlElasticPool sqlElasticPool = sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer
.databases()
.define("newDatabase")
.withExistingElasticPool(sqlElasticPool)
.withCollation(COLLATION)
.createAsync();
sqlDatabase = resourceStream.block();
sqlServer.databases().delete(sqlDatabase.name());
validateSqlDatabaseNotFound("newDatabase");
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlElasticPool() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlElasticPool> resourceStream =
sqlServer
.elasticPools()
.define(SQL_ELASTIC_POOL_NAME)
.withStandardPool()
.withTag("tag1", "value1")
.createAsync();
SqlElasticPool sqlElasticPool = resourceStream.block();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 0);
sqlElasticPool =
sqlElasticPool
.update()
.withReservedDtu(SqlElasticPoolBasicEDTUs.eDTU_100)
.withDatabaseMaxCapacity(20)
.withDatabaseMinCapacity(10)
.withStorageCapacity(102400 * 1024 * 1024L)
.withNewDatabase(SQL_DATABASE_NAME)
.withTag("tag2", "value2")
.apply();
validateSqlElasticPool(sqlElasticPool);
Assertions.assertEquals(sqlElasticPool.listDatabases().size(), 1);
Assertions.assertNotNull(sqlElasticPool.getDatabase(SQL_DATABASE_NAME));
validateSqlElasticPool(sqlServer.elasticPools().get(SQL_ELASTIC_POOL_NAME));
validateListSqlElasticPool(sqlServer.elasticPools().list());
sqlServer.databases().delete(SQL_DATABASE_NAME);
sqlServer.elasticPools().delete(SQL_ELASTIC_POOL_NAME);
validateSqlElasticPoolNotFound(sqlServer, SQL_ELASTIC_POOL_NAME);
resourceStream =
sqlServer.elasticPools().define("newElasticPool").withStandardPool().createAsync();
sqlElasticPool = resourceStream.block();
sqlServer.elasticPools().delete(sqlElasticPool.name());
validateSqlElasticPoolNotFound(sqlServer, "newElasticPool");
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
@Test
public void canCRUDSqlFirewallRule() throws Exception {
SqlServer sqlServer = createSqlServer();
sqlServer = sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName);
validateSqlServer(sqlServer);
Mono<SqlFirewallRule> resourceStream =
sqlServer
.firewallRules()
.define(SQL_FIREWALLRULE_NAME)
.withIpAddressRange(START_IPADDRESS, END_IPADDRESS)
.createAsync();
SqlFirewallRule sqlFirewallRule = resourceStream.block();
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
String secondFirewallRuleName = "secondFireWallRule";
SqlFirewallRule secondFirewallRule =
sqlServer.firewallRules().define(secondFirewallRuleName).withIpAddress(START_IPADDRESS).create();
Assertions.assertNotNull(secondFirewallRule);
secondFirewallRule = sqlServer.firewallRules().get(secondFirewallRuleName);
Assertions.assertNotNull(secondFirewallRule);
Assertions.assertEquals(START_IPADDRESS, secondFirewallRule.endIpAddress());
secondFirewallRule = secondFirewallRule.update().withEndIpAddress(END_IPADDRESS).apply();
validateSqlFirewallRule(secondFirewallRule, secondFirewallRuleName);
sqlServer.firewallRules().delete(secondFirewallRuleName);
final SqlServer finalSqlServer = sqlServer;
validateResourceNotFound(() -> finalSqlServer.firewallRules().get(secondFirewallRuleName));
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
validateSqlFirewallRule(sqlFirewallRule, SQL_FIREWALLRULE_NAME);
sqlFirewallRule.update().withEndIpAddress(START_IPADDRESS).apply();
sqlFirewallRule = sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME);
Assertions.assertEquals(sqlFirewallRule.endIpAddress(), START_IPADDRESS);
validateListSqlFirewallRule(sqlServer.firewallRules().list());
sqlServer.firewallRules().delete(sqlFirewallRule.name());
validateSqlFirewallRuleNotFound();
sqlServerManager.sqlServers().deleteByResourceGroup(sqlServer.resourceGroupName(), sqlServer.name());
validateSqlServerNotFound(sqlServer);
}
private void validateMultiCreation(
String database2Name,
String database1InEPName,
String database2InEPName,
String elasticPool1Name,
String elasticPool2Name,
String elasticPool3Name,
SqlServer sqlServer,
boolean deleteUsingUpdate) {
validateSqlServer(sqlServer);
validateSqlServer(sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName));
validateSqlDatabase(sqlServer.databases().get(SQL_DATABASE_NAME), SQL_DATABASE_NAME);
validateSqlFirewallRule(sqlServer.firewallRules().get(SQL_FIREWALLRULE_NAME), SQL_FIREWALLRULE_NAME);
List<SqlFirewallRule> firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(3, firewalls.size());
int startIPAddress = 0;
int endIPAddress = 0;
for (SqlFirewallRule firewall : firewalls) {
if (!firewall.name().equalsIgnoreCase(SQL_FIREWALLRULE_NAME)) {
Assertions.assertEquals(firewall.startIpAddress(), START_IPADDRESS);
if (firewall.endIpAddress().equalsIgnoreCase(START_IPADDRESS)) {
startIPAddress++;
} else if (firewall.endIpAddress().equalsIgnoreCase(END_IPADDRESS)) {
endIPAddress++;
}
}
}
Assertions.assertEquals(startIPAddress, 1);
Assertions.assertEquals(endIPAddress, 1);
Assertions.assertNotNull(sqlServer.databases().get(database2Name));
Assertions.assertNotNull(sqlServer.databases().get(database1InEPName));
Assertions.assertNotNull(sqlServer.databases().get(database2InEPName));
SqlElasticPool ep1 = sqlServer.elasticPools().get(elasticPool1Name);
validateSqlElasticPool(ep1, elasticPool1Name);
SqlElasticPool ep2 = sqlServer.elasticPools().get(elasticPool2Name);
Assertions.assertNotNull(ep2);
Assertions.assertEquals(ep2.edition(), ElasticPoolEdition.PREMIUM);
Assertions.assertEquals(ep2.listDatabases().size(), 2);
Assertions.assertNotNull(ep2.getDatabase(database1InEPName));
Assertions.assertNotNull(ep2.getDatabase(database2InEPName));
SqlElasticPool ep3 = sqlServer.elasticPools().get(elasticPool3Name);
Assertions.assertNotNull(ep3);
Assertions.assertEquals(ep3.edition(), ElasticPoolEdition.STANDARD);
if (!deleteUsingUpdate) {
sqlServer.databases().delete(database2Name);
sqlServer.databases().delete(database1InEPName);
sqlServer.databases().delete(database2InEPName);
sqlServer.databases().delete(SQL_DATABASE_NAME);
Assertions.assertEquals(ep1.listDatabases().size(), 0);
Assertions.assertEquals(ep2.listDatabases().size(), 0);
Assertions.assertEquals(ep3.listDatabases().size(), 0);
sqlServer.elasticPools().delete(elasticPool1Name);
sqlServer.elasticPools().delete(elasticPool2Name);
sqlServer.elasticPools().delete(elasticPool3Name);
firewalls = sqlServer.firewallRules().list();
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
} else {
sqlServer
.update()
.withoutDatabase(database2Name)
.withoutElasticPool(elasticPool1Name)
.withoutElasticPool(elasticPool2Name)
.withoutElasticPool(elasticPool3Name)
.withoutDatabase(database1InEPName)
.withoutDatabase(SQL_DATABASE_NAME)
.withoutDatabase(database2InEPName)
.withoutFirewallRule(SQL_FIREWALLRULE_NAME)
.apply();
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
firewalls = sqlServer.firewallRules().list();
Assertions.assertEquals(firewalls.size(), 2);
for (SqlFirewallRule firewallRule : firewalls) {
firewallRule.delete();
}
}
Assertions.assertEquals(sqlServer.elasticPools().list().size(), 0);
Assertions.assertEquals(sqlServer.databases().list().size(), 1);
}
private void validateSqlFirewallRuleNotFound() {
validateResourceNotFound(
() ->
sqlServerManager
.sqlServers()
.getByResourceGroup(rgName, sqlServerName)
.firewallRules()
.get(SQL_FIREWALLRULE_NAME));
}
private void validateSqlElasticPoolNotFound(SqlServer sqlServer, String elasticPoolName) {
validateResourceNotFound(() -> sqlServer.elasticPools().get(elasticPoolName));
}
private void validateSqlDatabaseNotFound(String newDatabase) {
validateResourceNotFound(
() -> sqlServerManager.sqlServers().getByResourceGroup(rgName, sqlServerName).databases().get(newDatabase));
}
private void validateSqlServerNotFound(SqlServer sqlServer) {
validateResourceNotFound(() -> sqlServerManager.sqlServers().getById(sqlServer.id()));
}
private void validateResourceNotFound(Supplier<Object> fetchResource) {
try {
Object result = fetchResource.get();
Assertions.assertNull(result);
} catch (ManagementException e) {
Assertions.assertEquals(404, e.getResponse().getStatusCode());
}
}
private SqlServer createSqlServer() {
return createSqlServer(sqlServerName);
}
private SqlServer createSqlServer(String sqlServerName) {
return sqlServerManager
.sqlServers()
.define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword("P@ssword~1")
.create();
}
private static void validateListSqlFirewallRule(List<SqlFirewallRule> sqlFirewallRules) {
boolean found = false;
for (SqlFirewallRule firewallRule : sqlFirewallRules) {
if (firewallRule.name().equals(SQL_FIREWALLRULE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlFirewallRule(SqlFirewallRule sqlFirewallRule, String firewallName) {
Assertions.assertNotNull(sqlFirewallRule);
Assertions.assertEquals(firewallName, sqlFirewallRule.name());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(START_IPADDRESS, sqlFirewallRule.startIpAddress());
Assertions.assertEquals(END_IPADDRESS, sqlFirewallRule.endIpAddress());
Assertions.assertEquals(rgName, sqlFirewallRule.resourceGroupName());
Assertions.assertEquals(sqlServerName, sqlFirewallRule.sqlServerName());
Assertions.assertEquals(Region.US_EAST, sqlFirewallRule.region());
}
private static void validateListSqlElasticPool(List<SqlElasticPool> sqlElasticPools) {
boolean found = false;
for (SqlElasticPool elasticPool : sqlElasticPools) {
if (elasticPool.name().equals(SQL_ELASTIC_POOL_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool) {
validateSqlElasticPool(sqlElasticPool, SQL_ELASTIC_POOL_NAME);
}
private void validateSqlElasticPool(SqlElasticPool sqlElasticPool, String elasticPoolName) {
Assertions.assertNotNull(sqlElasticPool);
Assertions.assertEquals(rgName, sqlElasticPool.resourceGroupName());
Assertions.assertEquals(elasticPoolName, sqlElasticPool.name());
Assertions.assertEquals(sqlServerName, sqlElasticPool.sqlServerName());
Assertions.assertEquals(ElasticPoolEdition.STANDARD, sqlElasticPool.edition());
Assertions.assertNotNull(sqlElasticPool.creationDate());
Assertions.assertNotEquals(0, sqlElasticPool.databaseDtuMax());
Assertions.assertNotEquals(0, sqlElasticPool.dtu());
}
private static void validateListSqlDatabase(List<SqlDatabase> sqlDatabases) {
boolean found = false;
for (SqlDatabase database : sqlDatabases) {
if (database.name().equals(SQL_DATABASE_NAME)) {
found = true;
}
}
Assertions.assertTrue(found);
}
private void validateSqlServer(SqlServer sqlServer) {
Assertions.assertNotNull(sqlServer);
Assertions.assertEquals(rgName, sqlServer.resourceGroupName());
Assertions.assertNotNull(sqlServer.fullyQualifiedDomainName());
Assertions.assertEquals("userName", sqlServer.administratorLogin());
}
private void validateSqlDatabase(SqlDatabase sqlDatabase, String databaseName) {
Assertions.assertNotNull(sqlDatabase);
Assertions.assertEquals(sqlDatabase.name(), databaseName);
Assertions.assertEquals(sqlServerName, sqlDatabase.sqlServerName());
Assertions.assertEquals(sqlDatabase.collation(), COLLATION);
Assertions.assertEquals(sqlDatabase.edition(), DatabaseEdition.STANDARD);
}
private void validateSqlDatabaseWithElasticPool(SqlDatabase sqlDatabase, String databaseName) {
validateSqlDatabase(sqlDatabase, databaseName);
Assertions.assertEquals(SQL_ELASTIC_POOL_NAME, sqlDatabase.elasticPoolName());
}
@Test
public void testRandomSku() {
List<DatabaseSku> databaseSkus = new LinkedList<>(Arrays.asList(DatabaseSku.getAll().toArray(new DatabaseSku[0])));
Collections.shuffle(databaseSkus);
List<ElasticPoolSku> elasticPoolSkus = new LinkedList<>(Arrays.asList(ElasticPoolSku.getAll().toArray(new ElasticPoolSku[0])));
Collections.shuffle(elasticPoolSkus);
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
if (serviceObjective.status() != CapabilityStatus.AVAILABLE && serviceObjective.status() != CapabilityStatus.DEFAULT) {
databaseSkus.remove(DatabaseSku.fromSku(serviceObjective.sku()));
}
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
if (performance.status() != CapabilityStatus.AVAILABLE && performance.status() != CapabilityStatus.DEFAULT) {
elasticPoolSkus.remove(ElasticPoolSku.fromSku(performance.sku()));
}
});
});
});
SqlServer sqlServer = sqlServerManager.sqlServers().define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin("userName")
.withAdministratorPassword(password())
.create();
Flux.merge(
Flux.range(0, 5)
.flatMap(i -> sqlServer.databases().define("database" + i).withSku(databaseSkus.get(i)).createAsync().cast(Indexable.class)),
Flux.range(0, 5)
.flatMap(i -> sqlServer.elasticPools().define("elasticPool" + i).withSku(elasticPoolSkus.get(i)).createAsync().cast(Indexable.class))
)
.blockLast();
}
@Test
@Disabled("Only run for updating sku")
public void generateSku() throws IOException {
StringBuilder databaseSkuBuilder = new StringBuilder();
StringBuilder elasticPoolSkuBuilder = new StringBuilder();
sqlServerManager.sqlServers().getCapabilitiesByRegion(Region.US_EAST).supportedCapabilitiesByServerVersion()
.forEach((x, serverVersionCapability) -> {
serverVersionCapability.supportedEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedServiceLevelObjectives().forEach(serviceObjective -> {
addStaticSkuDefinition(databaseSkuBuilder, edition.name(), serviceObjective.name(), serviceObjective.sku(), "DatabaseSku");
});
});
serverVersionCapability.supportedElasticPoolEditions().forEach(edition -> {
if (edition.name().equalsIgnoreCase("System")) {
return;
}
edition.supportedElasticPoolPerformanceLevels().forEach(performance -> {
String detailName = String.format("%s_%d", performance.sku().name(), performance.sku().capacity());
addStaticSkuDefinition(elasticPoolSkuBuilder, edition.name(), detailName, performance.sku(), "ElasticPoolSku");
});
});
});
String databaseSku = new String(readAllBytes(getClass().getResourceAsStream("/DatabaseSku.java")), StandardCharsets.UTF_8);
databaseSku = databaseSku.replace("<Generated>", databaseSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/DatabaseSku.java").toPath(), databaseSku.getBytes(StandardCharsets.UTF_8));
String elasticPoolSku = new String(readAllBytes(getClass().getResourceAsStream("/ElasticPoolSku.java")), StandardCharsets.UTF_8);
elasticPoolSku = elasticPoolSku.replace("<Generated>", elasticPoolSkuBuilder.toString());
Files.write(new File("src/main/java/com/azure/resourcemanager/sql/models/ElasticPoolSku.java").toPath(), elasticPoolSku.getBytes(StandardCharsets.UTF_8));
sqlServerManager.resourceManager().resourceGroups().define(rgName).withRegion(Region.US_EAST).create();
}
private byte[] readAllBytes(InputStream inputStream) throws IOException {
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
byte[] data = new byte[4096];
while (true) {
int size = inputStream.read(data);
if (size > 0) {
outputStream.write(data, 0, size);
} else {
return outputStream.toByteArray();
}
}
}
}
private void addStaticSkuDefinition(StringBuilder builder, String edition, String detailName, Sku sku, String className) {
builder
.append("\n ").append("/** ").append(edition).append(" Edition with ").append(detailName).append(" sku. */")
.append("\n ").append("public static final ").append(className).append(" ").append(String.format("%s_%s", edition.toUpperCase(Locale.ROOT), detailName.toUpperCase(Locale.ROOT))).append(" =")
.append("\n new ").append(className).append("(")
.append(sku.name() == null ? null : "\"" + sku.name() + "\"")
.append(", ")
.append(sku.tier() == null ? null : "\"" + sku.tier() + "\"")
.append(", ")
.append(sku.family() == null ? null : "\"" + sku.family() + "\"")
.append(", ")
.append(sku.capacity())
.append(", ")
.append(sku.size() == null ? null : "\"" + sku.size() + "\"")
.append(");");
}
} | |
why can't we use the API for this? `Epoll.isAvailable()` is that different? | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | if (eventLoopGroup instanceof EpollEventLoopGroup) { | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
do we really need this? I think netty should automatically uses Epoll if available? why do we need explicit code? | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
} | } | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} |
it does not seems so, since we hard coded the event loop type: NioEventLoopGroup and NioSocketChannel, it does not pick Epoll automatically. | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
} | } | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} |
yes, we can, but want to be consistent with the eventLoopGroup type. It should be consistent when we use EventLoopGroup and SocketChannel: NioEventLoopGroup -> NioSocketChannel EpollEventLoopGroup - > EpollSocketChannel | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | if (eventLoopGroup instanceof EpollEventLoopGroup) { | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
are you saying our SDK does not use epoll on linux by default? If that's the case I expect to see some perf difference. Are you seeing any perf diff before and after this PR on linux? | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
} | } | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} |
Do having an ability to configure/tune without code change will help in any scenarios (ex: CX scenarios) | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | .option(EpollChannelOption.TCP_KEEPINTVL, 1) | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
>TCP_KEEPINTVL [](http://example.com/codeflow?start=43&length=13) Are these consistent with .NET? | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | .option(EpollChannelOption.TCP_KEEPINTVL, 1) | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
What's the test coverage story? | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | bootstrap.channel(EpollSocketChannel.class) | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
Currently in Rntbd, it seems like we are not using epoll on Linux, neither fork queue on Mac. Based on the perf test, there is not much improvement neither. | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
} | } | private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} | class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
} |
E2E test by capturing TCP traces | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | bootstrap.channel(EpollSocketChannel.class) | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
Add config for enableTcpNative, tcp_keepIntvl, tcp_keepIdle | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class)
.option(EpollChannelOption.TCP_KEEPINTVL, 1)
.option(EpollChannelOption.TCP_KEEPIDLE, 30);
} else {
bootstrap.channel(NioSocketChannel.class);
}
return bootstrap;
} | .option(EpollChannelOption.TCP_KEEPINTVL, 1) | private Bootstrap getBootStrap(EventLoopGroup eventLoopGroup, Config config) {
checkNotNull(eventLoopGroup, "expected non-null eventLoopGroup");
checkNotNull(config, "expected non-null config");
RntbdLoop rntbdLoop = RntbdLoopNativeDetector.getRntbdLoop(config.preferTcpNative());
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(rntbdLoop.getChannelClass())
.option(ChannelOption.ALLOCATOR, config.allocator())
.option(ChannelOption.AUTO_READ, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.connectTimeoutInMillis())
.option(ChannelOption.RCVBUF_ALLOCATOR, receiveBufferAllocator)
.option(ChannelOption.SO_KEEPALIVE, true)
.remoteAddress(this.serverKey.getHost(), this.serverKey.getPort());
if (rntbdLoop instanceof RntbdLoopEpoll) {
bootstrap
.option(EpollChannelOption.TCP_KEEPINTVL, config.tcpKeepIntvl())
.option(EpollChannelOption.TCP_KEEPIDLE, config.tcpKeepIdle());
}
return bootstrap;
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
final DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-nio",
true,
options.ioThreadPriority());
if (Epoll.isAvailable()) {
return new EpollEventLoopGroup(options.threadCount(), threadFactory);
}
return new NioEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} | class RntbdServiceEndpoint implements RntbdEndpoint {
private static final String TAG_NAME = RntbdServiceEndpoint.class.getSimpleName();
private static final long QUIET_PERIOD = 2_000_000_000L;
private static final AtomicLong instanceCount = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(RntbdServiceEndpoint.class);
private static final AdaptiveRecvByteBufAllocator receiveBufferAllocator = new AdaptiveRecvByteBufAllocator();
private final RntbdClientChannelPool channelPool;
private final AtomicBoolean closed;
private final AtomicInteger concurrentRequests;
private final long id;
private final AtomicLong lastRequestNanoTime;
private final AtomicLong lastSuccessfulRequestNanoTime;
private final Instant createdTime;
private final RntbdMetrics metrics;
private final Provider provider;
private final URI serverKey;
private final SocketAddress remoteAddress;
private final RntbdRequestTimer requestTimer;
private final Tag tag;
private final int maxConcurrentRequests;
private final boolean channelAcquisitionContextEnabled;
private final RntbdConnectionStateListener connectionStateListener;
private RntbdServiceEndpoint(
final Provider provider,
final Config config,
final EventLoopGroup group,
final RntbdRequestTimer timer,
final URI physicalAddress) {
this.serverKey = RntbdUtils.getServerKey(physicalAddress);
final Bootstrap bootstrap = this.getBootStrap(group, config);
this.createdTime = Instant.now();
this.channelPool = new RntbdClientChannelPool(this, bootstrap, config);
this.remoteAddress = bootstrap.config().remoteAddress();
this.concurrentRequests = new AtomicInteger();
this.lastRequestNanoTime = new AtomicLong(System.nanoTime());
this.lastSuccessfulRequestNanoTime = new AtomicLong(System.nanoTime());
this.closed = new AtomicBoolean();
this.requestTimer = timer;
this.tag = Tag.of(TAG_NAME, RntbdMetrics.escape(this.remoteAddress.toString()));
this.id = instanceCount.incrementAndGet();
this.provider = provider;
this.metrics = new RntbdMetrics(provider.transportClient, this);
this.maxConcurrentRequests = config.maxConcurrentRequestsPerEndpoint();
this.connectionStateListener = this.provider.addressResolver != null && config.isConnectionEndpointRediscoveryEnabled()
? new RntbdConnectionStateListener(this.provider.addressResolver, this)
: null;
this.channelAcquisitionContextEnabled = config.isChannelAcquisitionContextEnabled();
}
/**
* @return approximate number of acquired channels.
*/
@Override
public int channelsAcquiredMetric() {
return this.channelPool.channelsAcquiredMetrics();
}
/**
* @return approximate number of available channels.
*/
@Override
public int channelsAvailableMetric() {
return this.channelPool.channelsAvailableMetrics();
}
@Override
public int concurrentRequests() {
return this.concurrentRequests.get();
}
@Override
public int gettingEstablishedConnectionsMetrics() {
return this.channelPool.attemptingToConnectMetrics();
}
@Override
public long id() {
return this.id;
}
@Override
public boolean isClosed() {
return this.closed.get();
}
@Override
public int maxChannels() {
return this.channelPool.channels(true);
}
public long lastRequestNanoTime() {
return this.lastRequestNanoTime.get();
}
@Override
public long lastSuccessfulRequestNanoTime() {
return this.lastSuccessfulRequestNanoTime.get();
}
@Override
public int channelsMetrics() {
return this.channelPool.channels(true);
}
@Override
public int executorTaskQueueMetrics() {
return this.channelPool.executorTaskQueueMetrics();
}
public Instant getCreatedTime() {
return this.createdTime;
}
@Override
public SocketAddress remoteAddress() {
return this.remoteAddress;
}
@Override
public URI serverKey() { return this.serverKey; }
@Override
public int requestQueueLength() {
return this.channelPool.requestQueueLength();
}
@Override
public Tag tag() {
return this.tag;
}
@Override
public long usedDirectMemory() {
return this.channelPool.usedDirectMemory();
}
@Override
public long usedHeapMemory() {
return this.channelPool.usedHeapMemory();
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.provider.evict(this);
this.channelPool.close();
}
}
public RntbdRequestRecord request(final RntbdRequestArgs args) {
this.throwIfClosed();
int concurrentRequestSnapshot = this.concurrentRequests.incrementAndGet();
RntbdEndpointStatistics stat = endpointMetricsSnapshot(concurrentRequestSnapshot);
if (concurrentRequestSnapshot > this.maxConcurrentRequests) {
try {
FailFastRntbdRequestRecord requestRecord = FailFastRntbdRequestRecord.createAndFailFast(
args,
concurrentRequestSnapshot,
metrics,
remoteAddress);
requestRecord.serviceEndpointStatistics(stat);
return requestRecord;
}
finally {
concurrentRequests.decrementAndGet();
}
}
this.lastRequestNanoTime.set(args.nanoTimeCreated());
final RntbdRequestRecord record = this.write(args);
record.serviceEndpointStatistics(stat);
record.whenComplete((response, error) -> {
this.concurrentRequests.decrementAndGet();
this.metrics.markComplete(record);
onResponse(error, record);
});
return record;
}
private void onResponse(Throwable exception, RntbdRequestRecord record) {
if (exception == null) {
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
}
if (this.connectionStateListener != null) {
this.connectionStateListener.onException(record.args().serviceRequest(), exception);
}
if (exception instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) exception;
switch (cosmosException.getStatusCode()) {
case HttpConstants.StatusCodes.CONFLICT:
case HttpConstants.StatusCodes.NOTFOUND:
this.lastSuccessfulRequestNanoTime.set(System.nanoTime());
return;
default:
return;
}
}
}
private RntbdEndpointStatistics endpointMetricsSnapshot(int concurrentRequestSnapshot) {
RntbdEndpointStatistics stats = new RntbdEndpointStatistics()
.availableChannels(this.channelsAvailableMetric())
.acquiredChannels(this.channelsAcquiredMetric())
.executorTaskQueueSize(this.executorTaskQueueMetrics())
.lastSuccessfulRequestNanoTime(this.lastSuccessfulRequestNanoTime())
.createdTime(this.createdTime)
.lastRequestNanoTime(this.lastRequestNanoTime())
.closed(this.closed.get())
.inflightRequests(concurrentRequestSnapshot);
return stats;
}
@Override
public String toString() {
return RntbdObjectMapper.toString(this);
}
private void ensureSuccessWhenReleasedToPool(Channel channel, Future<Void> released) {
if (released.isSuccess()) {
logger.debug("\n [{}]\n {}\n release succeeded", this, channel);
} else {
logger.debug("\n [{}]\n {}\n release failed due to {}", this, channel, released.cause());
}
}
private void releaseToPool(final Channel channel) {
logger.debug("\n [{}]\n {}\n RELEASE", this, channel);
final Future<Void> released = this.channelPool.release(channel);
if (logger.isDebugEnabled()) {
if (released.isDone()) {
ensureSuccessWhenReleasedToPool(channel, released);
} else {
released.addListener(ignored -> ensureSuccessWhenReleasedToPool(channel, released));
}
}
}
private void throwIfClosed() {
if (this.closed.get()) {
throw new TransportException(lenientFormat("%s is closed", this), new IllegalStateException());
}
}
private RntbdRequestRecord write(final RntbdRequestArgs requestArgs) {
final RntbdRequestRecord requestRecord = new AsyncRntbdRequestRecord(requestArgs, this.requestTimer);
requestRecord.channelAcquisitionContextEnabled(this.channelAcquisitionContextEnabled);
requestRecord.stage(RntbdRequestRecord.Stage.CHANNEL_ACQUISITION_STARTED);
final Future<Channel> connectedChannel = this.channelPool.acquire(requestRecord.getChannelAcquisitionTimeline());
logger.debug("\n [{}]\n {}\n WRITE WHEN CONNECTED {}", this, requestArgs, connectedChannel);
if (connectedChannel.isDone()) {
return writeWhenConnected(requestRecord, connectedChannel);
} else {
connectedChannel.addListener(ignored -> writeWhenConnected(requestRecord, connectedChannel));
}
return requestRecord;
}
private RntbdRequestRecord writeWhenConnected(
final RntbdRequestRecord requestRecord, final Future<? super Channel> connected) {
if (connected.isSuccess()) {
final Channel channel = (Channel) connected.getNow();
assert channel != null : "impossible";
this.releaseToPool(channel);
requestRecord.channelTaskQueueLength(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop()));
channel.write(requestRecord.stage(RntbdRequestRecord.Stage.PIPELINED));
return requestRecord;
}
final RntbdRequestArgs requestArgs = requestRecord.args();
final UUID activityId = requestArgs.activityId();
final Throwable cause = connected.cause();
if (connected.isCancelled()) {
logger.debug("\n [{}]\n {}\n write cancelled: {}", this, requestArgs, cause);
requestRecord.cancel(true);
} else {
logger.debug("\n [{}]\n {}\n write failed due to {} ", this, requestArgs, cause);
final String reason = cause.toString();
final GoneException goneException = new GoneException(
lenientFormat("failed to establish connection to %s due to %s", this.remoteAddress, reason),
cause instanceof Exception ? (Exception) cause : new IOException(reason, cause),
ImmutableMap.of(HttpHeaders.ACTIVITY_ID, activityId.toString()),
requestArgs.replicaPath()
);
BridgeInternal.setRequestHeaders(goneException, requestArgs.serviceRequest().getHeaders());
requestRecord.completeExceptionally(goneException);
}
return requestRecord;
}
static final class JsonSerializer extends StdSerializer<RntbdServiceEndpoint> {
private static final long serialVersionUID = -5764954918168771152L;
public JsonSerializer() {
super(RntbdServiceEndpoint.class);
}
@Override
public void serialize(RntbdServiceEndpoint value, JsonGenerator generator, SerializerProvider provider)
throws IOException {
final RntbdTransportClient transportClient = value.provider.transportClient;
generator.writeStartObject();
generator.writeNumberField("id", value.id);
generator.writeBooleanField("closed", value.isClosed());
generator.writeNumberField("concurrentRequests", value.concurrentRequests());
generator.writeStringField("remoteAddress", value.remoteAddress.toString());
generator.writeObjectField("channelPool", value.channelPool);
generator.writeObjectFieldStart("transportClient");
generator.writeNumberField("id", transportClient.id());
generator.writeBooleanField("closed", transportClient.isClosed());
generator.writeNumberField("endpointCount", transportClient.endpointCount());
generator.writeNumberField("endpointEvictionCount", transportClient.endpointEvictionCount());
generator.writeEndObject();
generator.writeEndObject();
}
}
public static final class Provider implements RntbdEndpoint.Provider {
private static final Logger logger = LoggerFactory.getLogger(Provider.class);
private final AtomicBoolean closed;
private final Config config;
private final ConcurrentHashMap<String, RntbdEndpoint> endpoints;
private final EventLoopGroup eventLoopGroup;
private final AtomicInteger evictions;
private final RntbdEndpointMonitoringProvider monitoring;
private final RntbdRequestTimer requestTimer;
private final RntbdTransportClient transportClient;
private final IAddressResolver addressResolver;
public Provider(
final RntbdTransportClient transportClient,
final Options options,
final SslContext sslContext,
final IAddressResolver addressResolver) {
checkNotNull(transportClient, "expected non-null provider");
checkNotNull(options, "expected non-null options");
checkNotNull(sslContext, "expected non-null sslContext");
final LogLevel wireLogLevel;
if (logger.isDebugEnabled()) {
wireLogLevel = LogLevel.TRACE;
} else {
wireLogLevel = null;
}
this.addressResolver = addressResolver;
this.transportClient = transportClient;
this.config = new Config(options, sslContext, wireLogLevel);
this.requestTimer = new RntbdRequestTimer(
config.requestTimeoutInNanos(),
config.requestTimerResolutionInNanos());
this.eventLoopGroup = this.getEventLoopGroup(options);
this.endpoints = new ConcurrentHashMap<>();
this.evictions = new AtomicInteger();
this.closed = new AtomicBoolean();
this.monitoring = new RntbdEndpointMonitoringProvider(this);
this.monitoring.init();
}
private EventLoopGroup getEventLoopGroup(Options options) {
checkNotNull(options, "expected non-null options");
RntbdLoop rntbdEventLoop = RntbdLoopNativeDetector.getRntbdLoop(options.preferTcpNative());
DefaultThreadFactory threadFactory =
new DefaultThreadFactory(
"cosmos-rntbd-" + rntbdEventLoop.getName(),
true,
options.ioThreadPriority());
return rntbdEventLoop.newEventLoopGroup(options.threadCount(), threadFactory);
}
@Override
public void close() {
if (this.closed.compareAndSet(false, true)) {
this.monitoring.close();
for (final RntbdEndpoint endpoint : this.endpoints.values()) {
endpoint.close();
}
this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS)
.addListener(future -> {
this.requestTimer.close();
if (future.isSuccess()) {
logger.debug("\n [{}]\n closed endpoints", this);
return;
}
logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause());
});
return;
}
logger.debug("\n [{}]\n already closed", this);
}
@Override
public Config config() {
return this.config;
}
@Override
public int count() {
return this.endpoints.size();
}
@Override
public int evictions() {
return this.evictions.get();
}
@Override
public RntbdEndpoint get(final URI physicalAddress) {
return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint(
this,
this.config,
this.eventLoopGroup,
this.requestTimer,
physicalAddress));
}
@Override
public IAddressResolver getAddressResolver() {
return this.addressResolver;
}
@Override
public Stream<RntbdEndpoint> list() {
return this.endpoints.values().stream();
}
private void evict(final RntbdEndpoint endpoint) {
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
this.evictions.incrementAndGet();
}
}
}
public static class RntbdEndpointMonitoringProvider implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(RntbdEndpointMonitoringProvider.class);
private static final EventExecutor monitoringRntbdChannelPool = new DefaultEventExecutor(new RntbdThreadFactory(
"monitoring-rntbd-endpoints",
true,
Thread.MIN_PRIORITY));
private static final Duration MONITORING_PERIOD = Duration.ofSeconds(60);
private final Provider provider;
private final static int MAX_TASK_LIMIT = 5_000;
private ScheduledFuture<?> future;
RntbdEndpointMonitoringProvider(Provider provider) {
this.provider = provider;
}
synchronized void init() {
logger.info("Starting RntbdClientChannelPoolMonitoringProvider ...");
this.future = RntbdEndpointMonitoringProvider.monitoringRntbdChannelPool.scheduleAtFixedRate(() -> {
logAllPools();
}, 0, MONITORING_PERIOD.toMillis(), TimeUnit.MILLISECONDS);
}
@Override
public synchronized void close() {
logger.info("Shutting down RntbdClientChannelPoolMonitoringProvider ...");
this.future.cancel(false);
this.future = null;
}
synchronized void logAllPools() {
try {
logger.debug("Total number of RntbdClientChannelPool [{}].", provider.endpoints.size());
for (RntbdEndpoint endpoint : provider.endpoints.values()) {
logEndpoint(endpoint);
}
} catch (Exception e) {
logger.error("monitoring unexpected failure", e);
}
}
private void logEndpoint(RntbdEndpoint endpoint) {
if (this.logger.isWarnEnabled() &&
(endpoint.executorTaskQueueMetrics() > MAX_TASK_LIMIT ||
endpoint.requestQueueLength() > MAX_TASK_LIMIT ||
endpoint.gettingEstablishedConnectionsMetrics() > 0 ||
endpoint.channelsMetrics() > endpoint.maxChannels())) {
logger.warn("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
} else if (this.logger.isDebugEnabled()) {
logger.debug("RntbdEndpoint Identifier {}, Stat {}", getPoolId(endpoint), getPoolStat(endpoint));
}
}
private String getPoolStat(RntbdEndpoint endpoint) {
return "[ "
+ "poolTaskExecutorSize " + endpoint.executorTaskQueueMetrics()
+ ", lastRequestNanoTime " + Instant.now().minusNanos(
System.nanoTime() - endpoint.lastRequestNanoTime())
+ ", connecting " + endpoint.gettingEstablishedConnectionsMetrics()
+ ", acquiredChannel " + endpoint.channelsAcquiredMetric()
+ ", availableChannel " + endpoint.channelsAvailableMetric()
+ ", pendingAcquisitionSize " + endpoint.requestQueueLength()
+ ", closed " + endpoint.isClosed()
+ " ]";
}
private String getPoolId(RntbdEndpoint endpoint) {
if (endpoint == null) {
return "null";
}
return "[RntbdEndpoint" +
", id " + endpoint.id() +
", remoteAddress " + endpoint.remoteAddress() +
", creationTime " + endpoint.getCreatedTime() +
", hashCode " + endpoint.hashCode() +
"]";
}
}
} |
Can `getRecognizePiiEntitiesActions` return null? | private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
AtomicInteger taskNumber = new AtomicInteger();
return StreamSupport.stream(actions.getRecognizePiiEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final PiiTask piiTask = new PiiTask();
piiTask
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter())))
.setTaskName(String.valueOf(taskNumber.getAndIncrement()));
return piiTask;
}).collect(Collectors.toList());
} | return StreamSupport.stream(actions.getRecognizePiiEntitiesActions().spliterator(), false).map( | private List<PiiTask> toPiiTask(TextAnalyticsActions actions) {
final List<PiiTask> piiTasks = new ArrayList<>();
for (RecognizePiiEntitiesAction action : actions.getRecognizePiiEntitiesActions()) {
if (action == null) {
piiTasks.add(null);
} else {
piiTasks.add(
new PiiTask()
.setTaskName(action.getActionName())
.setParameters(
new PiiTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setDomain(PiiTaskParametersDomain.fromString(
action.getDomainFilter() == null ? null
: action.getDomainFilter().toString()))
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setPiiCategories(toCategoriesFilter(action.getCategoriesFilter()))));
}
}
return piiTasks;
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS, EXTRACTIVE_SUMMARIZATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
AtomicInteger taskNumber = new AtomicInteger();
return StreamSupport.stream(actions.getRecognizeEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntitiesTask entitiesTask = new EntitiesTask();
entitiesTask
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT))
.setTaskName(String.valueOf(taskNumber.getAndIncrement()));
return entitiesTask;
}).collect(Collectors.toList());
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
AtomicInteger taskNumber = new AtomicInteger();
return StreamSupport.stream(actions.getExtractKeyPhrasesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask();
keyPhrasesTask
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled()))
.setTaskName(String.valueOf(taskNumber.getAndIncrement()));
return keyPhrasesTask;
}).collect(Collectors.toList());
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
AtomicInteger taskNumber = new AtomicInteger();
return StreamSupport.stream(actions.getRecognizeLinkedEntitiesActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final EntityLinkingTask entityLinkingTask = new EntityLinkingTask();
entityLinkingTask
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT))
.setTaskName(String.valueOf(taskNumber.getAndIncrement()));
return entityLinkingTask;
}).collect(Collectors.toList());
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
AtomicInteger taskNumber = new AtomicInteger();
return StreamSupport.stream(actions.getAnalyzeSentimentActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final SentimentAnalysisTask sentimentAnalysisTask = new SentimentAnalysisTask();
sentimentAnalysisTask
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT))
.setTaskName(String.valueOf(taskNumber.getAndIncrement()));
return sentimentAnalysisTask;
}).collect(Collectors.toList());
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
AtomicInteger taskNumber = new AtomicInteger();
return StreamSupport.stream(actions.getExtractSummaryActions().spliterator(), false).map(
action -> {
if (action == null) {
return null;
}
final ExtractiveSummarizationTask extractiveSummarizationTask =
new ExtractiveSummarizationTask();
extractiveSummarizationTask
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString())))
.setTaskName(String.valueOf(taskNumber.getAndIncrement()));
return extractiveSummarizationTask;
}).collect(Collectors.toList());
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} | class AnalyzeActionsAsyncClient {
private static final String ENTITY_RECOGNITION_TASKS = "entityRecognitionTasks";
private static final String ENTITY_RECOGNITION_PII_TASKS = "entityRecognitionPiiTasks";
private static final String KEY_PHRASE_EXTRACTION_TASKS = "keyPhraseExtractionTasks";
private static final String ENTITY_LINKING_TASKS = "entityLinkingTasks";
private static final String SENTIMENT_ANALYSIS_TASKS = "sentimentAnalysisTasks";
private static final String EXTRACTIVE_SUMMARIZATION_TASKS = "extractiveSummarizationTasks";
private static final String REGEX_ACTION_ERROR_TARGET =
String.format("
ENTITY_RECOGNITION_TASKS, ENTITY_LINKING_TASKS, SENTIMENT_ANALYSIS_TASKS, EXTRACTIVE_SUMMARIZATION_TASKS);
private final ClientLogger logger = new ClientLogger(AnalyzeActionsAsyncClient.class);
private final TextAnalyticsClientImpl service;
private static final Pattern PATTERN;
static {
PATTERN = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE);
}
AnalyzeActionsAsyncClient(TextAnalyticsClientImpl service) {
this.service = service;
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> beginAnalyzeActions(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail textAnalyticsOperationResult =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper
.setOperationId(textAnalyticsOperationResult,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return textAnalyticsOperationResult;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext)))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
PollerFlux<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> beginAnalyzeActionsIterable(
Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeActionsOptions options,
Context context) {
try {
inputDocumentsValidation(documents);
options = getNotNullAnalyzeActionsOptions(options);
final Context finalContext = getNotNullContext(context)
.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE);
final AnalyzeBatchInput analyzeBatchInput =
new AnalyzeBatchInput()
.setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents)))
.setTasks(getJobManifestTasks(actions));
analyzeBatchInput.setDisplayName(actions.getDisplayName());
final boolean finalIncludeStatistics = options.isIncludeStatistics();
return new PollerFlux<>(
DEFAULT_POLL_INTERVAL,
activationOperation(
service.analyzeWithResponseAsync(analyzeBatchInput, finalContext)
.map(analyzeResponse -> {
final AnalyzeActionsOperationDetail operationDetail =
new AnalyzeActionsOperationDetail();
AnalyzeActionsOperationDetailPropertiesHelper.setOperationId(operationDetail,
parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation()));
return operationDetail;
})),
pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId,
finalIncludeStatistics, null, null, finalContext)),
(activationResponse, pollingContext) ->
Mono.error(new RuntimeException("Cancellation is not supported.")),
fetchingOperationIterable(
operationId -> Mono.just(new AnalyzeActionsResultPagedIterable(getAnalyzeOperationFluxPage(
operationId, null, null, finalIncludeStatistics, finalContext))))
);
} catch (RuntimeException ex) {
return PollerFlux.error(ex);
}
}
private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) {
if (actions == null) {
return null;
}
final JobManifestTasks jobManifestTasks = new JobManifestTasks();
if (actions.getRecognizeEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionTasks(toEntitiesTask(actions));
}
if (actions.getRecognizePiiEntitiesActions() != null) {
jobManifestTasks.setEntityRecognitionPiiTasks(toPiiTask(actions));
}
if (actions.getExtractKeyPhrasesActions() != null) {
jobManifestTasks.setKeyPhraseExtractionTasks(toKeyPhrasesTask(actions));
}
if (actions.getRecognizeLinkedEntitiesActions() != null) {
jobManifestTasks.setEntityLinkingTasks(toEntityLinkingTask(actions));
}
if (actions.getAnalyzeSentimentActions() != null) {
jobManifestTasks.setSentimentAnalysisTasks(toSentimentAnalysisTask(actions));
}
if (actions.getExtractSummaryActions() != null) {
jobManifestTasks.setExtractiveSummarizationTasks(toExtractiveSummarizationTask(actions));
}
return jobManifestTasks;
}
private List<EntitiesTask> toEntitiesTask(TextAnalyticsActions actions) {
final List<EntitiesTask> entitiesTasks = new ArrayList<>();
for (RecognizeEntitiesAction action : actions.getRecognizeEntitiesActions()) {
if (action == null) {
entitiesTasks.add(null);
} else {
entitiesTasks.add(
new EntitiesTask()
.setTaskName(action.getActionName())
.setParameters(
new EntitiesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return entitiesTasks;
}
private List<KeyPhrasesTask> toKeyPhrasesTask(TextAnalyticsActions actions) {
final List<KeyPhrasesTask> keyPhrasesTasks = new ArrayList<>();
for (ExtractKeyPhrasesAction action : actions.getExtractKeyPhrasesActions()) {
if (action == null) {
keyPhrasesTasks.add(null);
} else {
keyPhrasesTasks.add(
new KeyPhrasesTask()
.setTaskName(action.getActionName())
.setParameters(
new KeyPhrasesTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())));
}
}
return keyPhrasesTasks;
}
private List<EntityLinkingTask> toEntityLinkingTask(TextAnalyticsActions actions) {
final List<EntityLinkingTask> entityLinkingTasks = new ArrayList<>();
for (RecognizeLinkedEntitiesAction action : actions.getRecognizeLinkedEntitiesActions()) {
if (action == null) {
entityLinkingTasks.add(null);
} else {
new EntityLinkingTask()
.setTaskName(action.getActionName())
.setParameters(
new EntityLinkingTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT));
}
}
return entityLinkingTasks;
}
private List<SentimentAnalysisTask> toSentimentAnalysisTask(TextAnalyticsActions actions) {
final List<SentimentAnalysisTask> sentimentAnalysisTasks = new ArrayList<>();
for (AnalyzeSentimentAction action : actions.getAnalyzeSentimentActions()) {
if (action == null) {
sentimentAnalysisTasks.add(null);
} else {
sentimentAnalysisTasks.add(
new SentimentAnalysisTask()
.setTaskName(action.getActionName())
.setParameters(
new SentimentAnalysisTaskParameters()
.setModelVersion(action.getModelVersion())
.setLoggingOptOut(action.isServiceLogsDisabled())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)));
}
}
return sentimentAnalysisTasks;
}
private List<ExtractiveSummarizationTask> toExtractiveSummarizationTask(TextAnalyticsActions actions) {
final List<ExtractiveSummarizationTask> extractiveSummarizationTasks = new ArrayList<>();
for (ExtractSummaryAction action : actions.getExtractSummaryActions()) {
if (action == null) {
extractiveSummarizationTasks.add(null);
} else {
extractiveSummarizationTasks.add(
new ExtractiveSummarizationTask()
.setTaskName(action.getActionName())
.setParameters(
new ExtractiveSummarizationTaskParameters()
.setModelVersion(action.getModelVersion())
.setStringIndexType(StringIndexType.UTF16CODE_UNIT)
.setLoggingOptOut(action.isServiceLogsDisabled())
.setSentenceCount(action.getMaxSentenceCount())
.setSortBy(action.getOrderBy() == null ? null
: ExtractiveSummarizationTaskParametersSortBy.fromString(
action.getOrderBy().toString()))));
}
}
return extractiveSummarizationTasks;
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsOperationDetail>>
activationOperation(Mono<AnalyzeActionsOperationDetail> operationResult) {
return pollingContext -> {
try {
return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<PollResponse<AnalyzeActionsOperationDetail>>>
pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) {
return pollingContext -> {
try {
final PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse =
pollingContext.getLatestResponse();
final String operationId = operationResultPollResponse.getValue().getOperationId();
return pollingFunction.apply(operationId)
.flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse))
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedFlux>>
fetchingOperation(Function<String, Mono<AnalyzeActionsResultPagedFlux>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
private Function<PollingContext<AnalyzeActionsOperationDetail>, Mono<AnalyzeActionsResultPagedIterable>>
fetchingOperationIterable(Function<String, Mono<AnalyzeActionsResultPagedIterable>> fetchingFunction) {
return pollingContext -> {
try {
final String operationId = pollingContext.getLatestResponse().getValue().getOperationId();
return fetchingFunction.apply(operationId);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
};
}
AnalyzeActionsResultPagedFlux getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip,
boolean showStats, Context context) {
return new AnalyzeActionsResultPagedFlux(
() -> (continuationToken, pageSize) ->
getPage(continuationToken, operationId, top, skip, showStats, context).flux());
}
Mono<PagedResponse<AnalyzeActionsResult>> getPage(String continuationToken, String operationId, Integer top,
Integer skip, boolean showStats, Context context) {
if (continuationToken != null) {
final Map<String, Object> continuationTokenMap = parseNextLink(continuationToken);
final Integer topValue = (Integer) continuationTokenMap.getOrDefault("$top", null);
final Integer skipValue = (Integer) continuationTokenMap.getOrDefault("$skip", null);
final Boolean showStatsValue = (Boolean) continuationTokenMap.getOrDefault(showStats, false);
return service.analyzeStatusWithResponseAsync(operationId, showStatsValue, topValue, skipValue, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
} else {
return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context)
.map(this::toAnalyzeActionsResultPagedResponse)
.onErrorMap(Utility::mapToHttpResponseExceptionIfExists);
}
}
private PagedResponse<AnalyzeActionsResult> toAnalyzeActionsResultPagedResponse(Response<AnalyzeJobState> response) {
final AnalyzeJobState analyzeJobState = response.getValue();
return new PagedResponseBase<Void, AnalyzeActionsResult>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
Arrays.asList(toAnalyzeActionsResult(analyzeJobState)),
analyzeJobState.getNextLink(),
null);
}
private AnalyzeActionsResult toAnalyzeActionsResult(AnalyzeJobState analyzeJobState) {
TasksStateTasks tasksStateTasks = analyzeJobState.getTasks();
final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems =
tasksStateTasks.getEntityRecognitionPiiTasks();
final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems =
tasksStateTasks.getEntityRecognitionTasks();
final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks =
tasksStateTasks.getKeyPhraseExtractionTasks();
final List<TasksStateTasksEntityLinkingTasksItem> linkedEntityRecognitionTasksItems =
tasksStateTasks.getEntityLinkingTasks();
final List<TasksStateTasksSentimentAnalysisTasksItem> sentimentAnalysisTasksItems =
tasksStateTasks.getSentimentAnalysisTasks();
final List<TasksStateTasksExtractiveSummarizationTasksItem> extractiveSummarizationTasksItems =
tasksStateTasks.getExtractiveSummarizationTasks();
List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>();
List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>();
List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>();
List<RecognizeLinkedEntitiesActionResult> recognizeLinkedEntitiesActionResults = new ArrayList<>();
List<AnalyzeSentimentActionResult> analyzeSentimentActionResults = new ArrayList<>();
List<ExtractSummaryActionResult> extractSummaryActionResults = new ArrayList<>();
if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) {
for (int i = 0; i < entityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i);
final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult();
final EntitiesResult results = taskItem.getResults();
if (results != null) {
RecognizeEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeEntitiesResultCollectionResponse(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(piiTasksItems)) {
for (int i = 0; i < piiTasksItems.size(); i++) {
final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i);
final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult();
final PiiResult results = taskItem.getResults();
if (results != null) {
RecognizePiiEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizePiiEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizePiiEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) {
for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) {
final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i);
final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult();
final KeyPhraseResult results = taskItem.getResults();
if (results != null) {
ExtractKeyPhrasesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractKeyPhrasesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractKeyPhrasesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(linkedEntityRecognitionTasksItems)) {
for (int i = 0; i < linkedEntityRecognitionTasksItems.size(); i++) {
final TasksStateTasksEntityLinkingTasksItem taskItem = linkedEntityRecognitionTasksItems.get(i);
final RecognizeLinkedEntitiesActionResult actionResult = new RecognizeLinkedEntitiesActionResult();
final EntityLinkingResult results = taskItem.getResults();
if (results != null) {
RecognizeLinkedEntitiesActionResultPropertiesHelper.setDocumentsResults(actionResult,
toRecognizeLinkedEntitiesResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
recognizeLinkedEntitiesActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(sentimentAnalysisTasksItems)) {
for (int i = 0; i < sentimentAnalysisTasksItems.size(); i++) {
final TasksStateTasksSentimentAnalysisTasksItem taskItem = sentimentAnalysisTasksItems.get(i);
final AnalyzeSentimentActionResult actionResult = new AnalyzeSentimentActionResult();
final SentimentResponse results = taskItem.getResults();
if (results != null) {
AnalyzeSentimentActionResultPropertiesHelper.setDocumentsResults(actionResult,
toAnalyzeSentimentResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
analyzeSentimentActionResults.add(actionResult);
}
}
if (!CoreUtils.isNullOrEmpty(extractiveSummarizationTasksItems)) {
for (int i = 0; i < extractiveSummarizationTasksItems.size(); i++) {
final TasksStateTasksExtractiveSummarizationTasksItem taskItem =
extractiveSummarizationTasksItems.get(i);
final ExtractSummaryActionResult actionResult = new ExtractSummaryActionResult();
final ExtractiveSummarizationResult results = taskItem.getResults();
if (results != null) {
ExtractSummaryActionResultPropertiesHelper.setDocumentsResults(actionResult,
toExtractSummaryResultCollection(results));
}
TextAnalyticsActionResultPropertiesHelper.setActionName(actionResult, taskItem.getTaskName());
TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult,
taskItem.getLastUpdateDateTime());
extractSummaryActionResults.add(actionResult);
}
}
final List<TextAnalyticsError> errors = analyzeJobState.getErrors();
if (!CoreUtils.isNullOrEmpty(errors)) {
for (TextAnalyticsError error : errors) {
final String[] targetPair = parseActionErrorTarget(error.getTarget());
final String taskName = targetPair[0];
final Integer taskIndex = Integer.valueOf(targetPair[1]);
final TextAnalyticsActionResult actionResult;
if (ENTITY_RECOGNITION_TASKS.equals(taskName)) {
actionResult = recognizeEntitiesActionResults.get(taskIndex);
} else if (ENTITY_RECOGNITION_PII_TASKS.equals(taskName)) {
actionResult = recognizePiiEntitiesActionResults.get(taskIndex);
} else if (KEY_PHRASE_EXTRACTION_TASKS.equals(taskName)) {
actionResult = extractKeyPhrasesActionResults.get(taskIndex);
} else if (ENTITY_LINKING_TASKS.equals(taskName)) {
actionResult = recognizeLinkedEntitiesActionResults.get(taskIndex);
} else if (SENTIMENT_ANALYSIS_TASKS.equals(taskName)) {
actionResult = analyzeSentimentActionResults.get(taskIndex);
} else if (EXTRACTIVE_SUMMARIZATION_TASKS.equals(taskName)) {
actionResult = extractSummaryActionResults.get(taskIndex);
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Invalid task name in target reference, " + taskName));
}
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
TextAnalyticsActionResultPropertiesHelper.setError(actionResult,
new com.azure.ai.textanalytics.models.TextAnalyticsError(
TextAnalyticsErrorCode.fromString(
error.getCode() == null ? null : error.getCode().toString()),
error.getMessage(), null));
}
}
final AnalyzeActionsResult analyzeActionsResult = new AnalyzeActionsResult();
AnalyzeActionsResultPropertiesHelper.setRecognizeEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizePiiEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizePiiEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractKeyPhrasesResults(analyzeActionsResult,
IterableStream.of(extractKeyPhrasesActionResults));
AnalyzeActionsResultPropertiesHelper.setRecognizeLinkedEntitiesResults(analyzeActionsResult,
IterableStream.of(recognizeLinkedEntitiesActionResults));
AnalyzeActionsResultPropertiesHelper.setAnalyzeSentimentResults(analyzeActionsResult,
IterableStream.of(analyzeSentimentActionResults));
AnalyzeActionsResultPropertiesHelper.setExtractSummaryResults(analyzeActionsResult,
IterableStream.of(extractSummaryActionResults));
return analyzeActionsResult;
}
private Mono<PollResponse<AnalyzeActionsOperationDetail>> processAnalyzedModelResponse(
Response<AnalyzeJobState> analyzeJobStateResponse,
PollResponse<AnalyzeActionsOperationDetail> operationResultPollResponse) {
LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) {
switch (analyzeJobStateResponse.getValue().getStatus()) {
case NOT_STARTED:
case RUNNING:
status = LongRunningOperationStatus.IN_PROGRESS;
break;
case SUCCEEDED:
status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case CANCELLED:
status = LongRunningOperationStatus.USER_CANCELLED;
break;
default:
status = LongRunningOperationStatus.fromString(
analyzeJobStateResponse.getValue().getStatus().toString(), true);
break;
}
}
AnalyzeActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getDisplayName());
AnalyzeActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getCreatedDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getExpirationDateTime());
AnalyzeActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(),
analyzeJobStateResponse.getValue().getLastUpdateDateTime());
final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks();
AnalyzeActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(),
tasksResult.getFailed());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(),
tasksResult.getInProgress());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsSucceeded(
operationResultPollResponse.getValue(), tasksResult.getCompleted());
AnalyzeActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(),
tasksResult.getTotal());
return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue()));
}
private Context getNotNullContext(Context context) {
return context == null ? Context.NONE : context;
}
private AnalyzeActionsOptions getNotNullAnalyzeActionsOptions(AnalyzeActionsOptions options) {
return options == null ? new AnalyzeActionsOptions() : options;
}
private String[] parseActionErrorTarget(String targetReference) {
if (CoreUtils.isNullOrEmpty(targetReference)) {
throw logger.logExceptionAsError(new RuntimeException(
"Expected an error with a target field referencing an action but did not get one"));
}
final Matcher matcher = PATTERN.matcher(targetReference);
String[] taskNameIdPair = new String[2];
while (matcher.find()) {
taskNameIdPair[0] = matcher.group(1);
taskNameIdPair[1] = matcher.group(2);
}
return taskNameIdPair;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.