comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
could you please add an `assert(databaseAccount != null)` here. for sanity check and readibility. | private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
} | DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); | private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
assert(databaseAccount != null);
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean enableTransportClientSharing;
private CosmosKeyCredential cosmosKeyCredential;
private TokenResolver tokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
TokenResolver tokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean enableTransportClientSharing) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, enableTransportClientSharing);
this.tokenResolver = tokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean enableTransportClientSharing) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, enableTransportClientSharing);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean enableTransportClientSharing) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.enableTransportClientSharing = enableTransportClientSharing;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeoutInMillis() / 1000,
0,
this.userAgentContainer,
this.enableTransportClientSharing
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis());
return HttpClient.createFixed(httpClientConfig);
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ((JsonSerializable) object).toJson();
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsString != null) {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
cosmosItemProperties = new CosmosItemProperties(contentAsString);
}
partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = document.getObjectByPath(parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
String content = toJsonString(document, mapper);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.tokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.tokenResolver != null) {
return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
try {
return CosmosResourceType.valueOf(resourceType.toString());
} catch (IllegalArgumentException e) {
return CosmosResourceType.System;
}
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
String content = toJsonString(document, mapper);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(req);
}
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(collection
.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document -> document.toObject(klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private String getCurentParamName(int paramCnt){
return "@param" + paramCnt;
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType,
Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
RequestOptions requestOptions = new RequestOptions();
requestOptions.setPartitionKey(options.partitionKey());
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs);
return requestObs.flatMap(req -> this.readFeed(req)
.map(response -> toFeedResponsePage(response, klass)));
}, this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private CosmosKeyCredential cosmosKeyCredential;
private TokenResolver tokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
TokenResolver tokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled);
this.tokenResolver = tokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeoutInMillis() / 1000,
0,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig);
} else {
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ((JsonSerializable) object).toJson();
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null) {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
contentAsByteBuffer.rewind();
cosmosItemProperties = new CosmosItemProperties(contentAsByteBuffer);
}
partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = document.getObjectByPath(parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
ByteBuffer content = serializeJsonToByteBuffer(document, mapper);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.tokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.tokenResolver != null) {
return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
try {
return CosmosResourceType.valueOf(resourceType.toString());
} catch (IllegalArgumentException e) {
return CosmosResourceType.System;
}
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
ByteBuffer content = serializeJsonToByteBuffer(document, mapper);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(req);
}
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(collection
.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document -> document.toObject(klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private String getCurentParamName(int paramCnt){
return "@param" + paramCnt;
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType,
Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
RequestOptions requestOptions = new RequestOptions();
requestOptions.setPartitionKey(options.partitionKey());
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs);
return requestObs.flatMap(req -> this.readFeed(req)
.map(response -> toFeedResponsePage(response, klass)));
}, this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} |
does this mean there is a window of time where there is an invalid result in the cache? | private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) {
final GlobalEndpointManager that = this;
Callable<Mono<DatabaseAccount>> fetchDatabaseAccount = () -> {
return that.owner.getDatabaseAccountFromEndpoint(serviceEndpoint).doOnNext(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
logger.debug("account retrieved: {}", databaseAccount);
}).single();
};
Mono<DatabaseAccount> obsoleteValueMono = databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, null, fetchDatabaseAccount);
return obsoleteValueMono.flatMap(obsoleteValue -> {
if (firstTimeDatabaseAccountInitialization.compareAndSet(true, false)) {
return Mono.just(obsoleteValue);
}
return databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, obsoleteValue, fetchDatabaseAccount).doOnError(t -> {
databaseAccountAsyncCache.set(StringUtils.EMPTY, obsoleteValue);
});
});
} | databaseAccountAsyncCache.set(StringUtils.EMPTY, obsoleteValue); | private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) {
return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint)
.doOnNext(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
logger.debug("account retrieved: {}", databaseAccount);
}).single();
} | class GlobalEndpointManager implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class);
private final int backgroundRefreshLocationTimeIntervalInMS;
private final LocationCache locationCache;
private final URI defaultEndpoint;
private final ConnectionPolicy connectionPolicy;
private final DatabaseAccountManagerInternal owner;
private final AtomicBoolean isRefreshing;
private final AtomicBoolean refreshInBackground;
private final ExecutorService executor = Executors.newSingleThreadExecutor();
private final Scheduler scheduler = Schedulers.fromExecutor(executor);
private volatile boolean isClosed;
private final AsyncCache<String, DatabaseAccount> databaseAccountAsyncCache;
private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true);
private volatile DatabaseAccount latestDatabaseAccount;
public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) {
this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000;
this.databaseAccountAsyncCache = new AsyncCache<>();
try {
this.locationCache = new LocationCache(
new ArrayList<>(connectionPolicy.getPreferredLocations() != null ?
connectionPolicy.getPreferredLocations():
Collections.emptyList()
),
owner.getServiceEndpoint(),
connectionPolicy.getEnableEndpointDiscovery(),
BridgeInternal.getUseMultipleWriteLocations(connectionPolicy),
configs);
this.owner = owner;
this.defaultEndpoint = owner.getServiceEndpoint();
this.connectionPolicy = connectionPolicy;
this.isRefreshing = new AtomicBoolean(false);
this.refreshInBackground = new AtomicBoolean(false);
this.isClosed = false;
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void init() {
startRefreshLocationTimerAsync(true).block();
}
public UnmodifiableList<URI> getReadEndpoints() {
return this.locationCache.getReadEndpoints();
}
public UnmodifiableList<URI> getWriteEndpoints() {
return this.locationCache.getWriteEndpoints();
}
public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync(
URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) {
return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume(
e -> {
logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage());
if (locations.isEmpty()) {
return Mono.error(e);
}
Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size())
.map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux());
Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single();
return res.doOnError(
innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage()));
});
}
public URI resolveServiceEndpoint(RxDocumentServiceRequest request) {
return this.locationCache.resolveServiceEndpoint(request);
}
public void markEndpointUnavailableForRead(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for read",endpoint);
this.locationCache.markEndpointUnavailableForRead(endpoint);;
}
public void markEndpointUnavailableForWrite(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for Write",endpoint);
this.locationCache.markEndpointUnavailableForWrite(endpoint);
}
public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) {
return this.locationCache.canUseMultipleWriteLocations(request);
}
public void close() {
this.isClosed = true;
this.executor.shutdown();
logger.debug("GlobalEndpointManager closed.");
}
public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) {
return Mono.defer(() -> {
logger.debug("refreshLocationAsync() invoked");
if (forceRefresh) {
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
return dbAccount;
}).flatMap(dbAccount -> {
return Mono.empty();
});
}
if (!isRefreshing.compareAndSet(false, true)) {
logger.debug("in the middle of another refresh. Not invoking a new refresh.");
return Mono.empty();
}
logger.debug("will refresh");
return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false));
});
}
public Mono<DatabaseAccount> getDatabaseAccountFromCache(URI defaultEndpoint) {
return this.databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, null, () -> this.owner.getDatabaseAccountFromEndpoint(defaultEndpoint).flatMap(databaseAccount -> {
if (databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
Mono<Void> refreshLocationCompletable = this.refreshLocationAsync(databaseAccount, false);
return refreshLocationCompletable.then(Mono.just(databaseAccount));
}).single());
}
public DatabaseAccount getLatestDatabaseAccount() {
return this.latestDatabaseAccount;
}
private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) {
return Mono.defer(() -> {
logger.debug("refreshLocationPrivateAsync() refreshing locations");
if (databaseAccount != null) {
this.locationCache.onDatabaseAccountRead(databaseAccount);
}
Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>();
if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) {
logger.debug("shouldRefreshEndpoints: true");
if (databaseAccount == null && !canRefreshInBackground.v) {
logger.debug("shouldRefreshEndpoints: can't be done in background");
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
this.isRefreshing.set(false);
return dbAccount;
}).flatMap(dbAccount -> {
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
return Mono.empty();
});
}
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
this.isRefreshing.set(false);
return Mono.empty();
} else {
logger.debug("shouldRefreshEndpoints: false, nothing to do.");
this.isRefreshing.set(false);
return Mono.empty();
}
});
}
private void startRefreshLocationTimerAsync() {
startRefreshLocationTimerAsync(false).subscribe();
}
private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) {
if (this.isClosed) {
logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed");
return Mono.empty();
}
logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS);
LocalDateTime now = LocalDateTime.now();
int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS;
this.refreshInBackground.set(true);
return Mono.delay(Duration.ofMillis(delayInMillis))
.flatMap(
t -> {
if (this.isClosed) {
logger.warn("client already closed");
return Mono.empty();
}
logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now);
Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.flatMap(dbAccount -> {
logger.debug("db account retrieved");
this.refreshInBackground.set(false);
return this.refreshLocationPrivateAsync(dbAccount);
});
}).onErrorResume(ex -> {
logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex);
this.startRefreshLocationTimerAsync();
return Mono.empty();
}).subscribeOn(scheduler);
}
public boolean isClosed() {
return this.isClosed;
}
} | class GlobalEndpointManager implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class);
private final int backgroundRefreshLocationTimeIntervalInMS;
private final LocationCache locationCache;
private final URI defaultEndpoint;
private final ConnectionPolicy connectionPolicy;
private final DatabaseAccountManagerInternal owner;
private final AtomicBoolean isRefreshing;
private final AtomicBoolean refreshInBackground;
private final ExecutorService executor = Executors.newSingleThreadExecutor();
private final Scheduler scheduler = Schedulers.fromExecutor(executor);
private volatile boolean isClosed;
private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true);
private volatile DatabaseAccount latestDatabaseAccount;
public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) {
this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000;
try {
this.locationCache = new LocationCache(
new ArrayList<>(connectionPolicy.getPreferredLocations() != null ?
connectionPolicy.getPreferredLocations():
Collections.emptyList()
),
owner.getServiceEndpoint(),
connectionPolicy.getEnableEndpointDiscovery(),
BridgeInternal.getUseMultipleWriteLocations(connectionPolicy),
configs);
this.owner = owner;
this.defaultEndpoint = owner.getServiceEndpoint();
this.connectionPolicy = connectionPolicy;
this.isRefreshing = new AtomicBoolean(false);
this.refreshInBackground = new AtomicBoolean(false);
this.isClosed = false;
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void init() {
startRefreshLocationTimerAsync(true).block();
}
public UnmodifiableList<URI> getReadEndpoints() {
return this.locationCache.getReadEndpoints();
}
public UnmodifiableList<URI> getWriteEndpoints() {
return this.locationCache.getWriteEndpoints();
}
public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync(
URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) {
return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume(
e -> {
logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage());
if (locations.isEmpty()) {
return Mono.error(e);
}
Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size())
.map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux());
Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single();
return res.doOnError(
innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage()));
});
}
public URI resolveServiceEndpoint(RxDocumentServiceRequest request) {
return this.locationCache.resolveServiceEndpoint(request);
}
public void markEndpointUnavailableForRead(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for read",endpoint);
this.locationCache.markEndpointUnavailableForRead(endpoint);;
}
public void markEndpointUnavailableForWrite(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for Write",endpoint);
this.locationCache.markEndpointUnavailableForWrite(endpoint);
}
public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) {
return this.locationCache.canUseMultipleWriteLocations(request);
}
public void close() {
this.isClosed = true;
this.executor.shutdown();
logger.debug("GlobalEndpointManager closed.");
}
public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) {
return Mono.defer(() -> {
logger.debug("refreshLocationAsync() invoked");
if (forceRefresh) {
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
return dbAccount;
}).flatMap(dbAccount -> {
return Mono.empty();
});
}
if (!isRefreshing.compareAndSet(false, true)) {
logger.debug("in the middle of another refresh. Not invoking a new refresh.");
return Mono.empty();
}
logger.debug("will refresh");
return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false));
});
}
/**
* This will provide the latest databaseAccount.
* If due to some reason last databaseAccount update was null,
* this method will return previous valid value
* @return DatabaseAccount
*/
public DatabaseAccount getLatestDatabaseAccount() {
return this.latestDatabaseAccount;
}
private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) {
return Mono.defer(() -> {
logger.debug("refreshLocationPrivateAsync() refreshing locations");
if (databaseAccount != null) {
this.locationCache.onDatabaseAccountRead(databaseAccount);
}
Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>();
if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) {
logger.debug("shouldRefreshEndpoints: true");
if (databaseAccount == null && !canRefreshInBackground.v) {
logger.debug("shouldRefreshEndpoints: can't be done in background");
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
this.isRefreshing.set(false);
return dbAccount;
}).flatMap(dbAccount -> {
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
return Mono.empty();
});
}
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
this.isRefreshing.set(false);
return Mono.empty();
} else {
logger.debug("shouldRefreshEndpoints: false, nothing to do.");
this.isRefreshing.set(false);
return Mono.empty();
}
});
}
private void startRefreshLocationTimerAsync() {
startRefreshLocationTimerAsync(false).subscribe();
}
private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) {
if (this.isClosed) {
logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed");
return Mono.empty();
}
logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS);
LocalDateTime now = LocalDateTime.now();
int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS;
this.refreshInBackground.set(true);
return Mono.delay(Duration.ofMillis(delayInMillis))
.flatMap(
t -> {
if (this.isClosed) {
logger.warn("client already closed");
return Mono.empty();
}
logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now);
Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.flatMap(dbAccount -> {
logger.debug("db account retrieved");
this.refreshInBackground.set(false);
return this.refreshLocationPrivateAsync(dbAccount);
});
}).onErrorResume(ex -> {
logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex);
this.startRefreshLocationTimerAsync();
return Mono.empty();
}).subscribeOn(scheduler);
}
public boolean isClosed() {
return this.isClosed;
}
} |
Yes | private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) {
final GlobalEndpointManager that = this;
Callable<Mono<DatabaseAccount>> fetchDatabaseAccount = () -> {
return that.owner.getDatabaseAccountFromEndpoint(serviceEndpoint).doOnNext(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
logger.debug("account retrieved: {}", databaseAccount);
}).single();
};
Mono<DatabaseAccount> obsoleteValueMono = databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, null, fetchDatabaseAccount);
return obsoleteValueMono.flatMap(obsoleteValue -> {
if (firstTimeDatabaseAccountInitialization.compareAndSet(true, false)) {
return Mono.just(obsoleteValue);
}
return databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, obsoleteValue, fetchDatabaseAccount).doOnError(t -> {
databaseAccountAsyncCache.set(StringUtils.EMPTY, obsoleteValue);
});
});
} | databaseAccountAsyncCache.set(StringUtils.EMPTY, obsoleteValue); | private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) {
return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint)
.doOnNext(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
logger.debug("account retrieved: {}", databaseAccount);
}).single();
} | class GlobalEndpointManager implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class);
private final int backgroundRefreshLocationTimeIntervalInMS;
private final LocationCache locationCache;
private final URI defaultEndpoint;
private final ConnectionPolicy connectionPolicy;
private final DatabaseAccountManagerInternal owner;
private final AtomicBoolean isRefreshing;
private final AtomicBoolean refreshInBackground;
private final ExecutorService executor = Executors.newSingleThreadExecutor();
private final Scheduler scheduler = Schedulers.fromExecutor(executor);
private volatile boolean isClosed;
private final AsyncCache<String, DatabaseAccount> databaseAccountAsyncCache;
private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true);
private volatile DatabaseAccount latestDatabaseAccount;
public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) {
this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000;
this.databaseAccountAsyncCache = new AsyncCache<>();
try {
this.locationCache = new LocationCache(
new ArrayList<>(connectionPolicy.getPreferredLocations() != null ?
connectionPolicy.getPreferredLocations():
Collections.emptyList()
),
owner.getServiceEndpoint(),
connectionPolicy.getEnableEndpointDiscovery(),
BridgeInternal.getUseMultipleWriteLocations(connectionPolicy),
configs);
this.owner = owner;
this.defaultEndpoint = owner.getServiceEndpoint();
this.connectionPolicy = connectionPolicy;
this.isRefreshing = new AtomicBoolean(false);
this.refreshInBackground = new AtomicBoolean(false);
this.isClosed = false;
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void init() {
startRefreshLocationTimerAsync(true).block();
}
public UnmodifiableList<URI> getReadEndpoints() {
return this.locationCache.getReadEndpoints();
}
public UnmodifiableList<URI> getWriteEndpoints() {
return this.locationCache.getWriteEndpoints();
}
public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync(
URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) {
return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume(
e -> {
logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage());
if (locations.isEmpty()) {
return Mono.error(e);
}
Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size())
.map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux());
Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single();
return res.doOnError(
innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage()));
});
}
public URI resolveServiceEndpoint(RxDocumentServiceRequest request) {
return this.locationCache.resolveServiceEndpoint(request);
}
public void markEndpointUnavailableForRead(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for read",endpoint);
this.locationCache.markEndpointUnavailableForRead(endpoint);;
}
public void markEndpointUnavailableForWrite(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for Write",endpoint);
this.locationCache.markEndpointUnavailableForWrite(endpoint);
}
public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) {
return this.locationCache.canUseMultipleWriteLocations(request);
}
public void close() {
this.isClosed = true;
this.executor.shutdown();
logger.debug("GlobalEndpointManager closed.");
}
public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) {
return Mono.defer(() -> {
logger.debug("refreshLocationAsync() invoked");
if (forceRefresh) {
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
return dbAccount;
}).flatMap(dbAccount -> {
return Mono.empty();
});
}
if (!isRefreshing.compareAndSet(false, true)) {
logger.debug("in the middle of another refresh. Not invoking a new refresh.");
return Mono.empty();
}
logger.debug("will refresh");
return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false));
});
}
public Mono<DatabaseAccount> getDatabaseAccountFromCache(URI defaultEndpoint) {
return this.databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, null, () -> this.owner.getDatabaseAccountFromEndpoint(defaultEndpoint).flatMap(databaseAccount -> {
if (databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
Mono<Void> refreshLocationCompletable = this.refreshLocationAsync(databaseAccount, false);
return refreshLocationCompletable.then(Mono.just(databaseAccount));
}).single());
}
public DatabaseAccount getLatestDatabaseAccount() {
return this.latestDatabaseAccount;
}
private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) {
return Mono.defer(() -> {
logger.debug("refreshLocationPrivateAsync() refreshing locations");
if (databaseAccount != null) {
this.locationCache.onDatabaseAccountRead(databaseAccount);
}
Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>();
if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) {
logger.debug("shouldRefreshEndpoints: true");
if (databaseAccount == null && !canRefreshInBackground.v) {
logger.debug("shouldRefreshEndpoints: can't be done in background");
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
this.isRefreshing.set(false);
return dbAccount;
}).flatMap(dbAccount -> {
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
return Mono.empty();
});
}
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
this.isRefreshing.set(false);
return Mono.empty();
} else {
logger.debug("shouldRefreshEndpoints: false, nothing to do.");
this.isRefreshing.set(false);
return Mono.empty();
}
});
}
private void startRefreshLocationTimerAsync() {
startRefreshLocationTimerAsync(false).subscribe();
}
private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) {
if (this.isClosed) {
logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed");
return Mono.empty();
}
logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS);
LocalDateTime now = LocalDateTime.now();
int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS;
this.refreshInBackground.set(true);
return Mono.delay(Duration.ofMillis(delayInMillis))
.flatMap(
t -> {
if (this.isClosed) {
logger.warn("client already closed");
return Mono.empty();
}
logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now);
Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.flatMap(dbAccount -> {
logger.debug("db account retrieved");
this.refreshInBackground.set(false);
return this.refreshLocationPrivateAsync(dbAccount);
});
}).onErrorResume(ex -> {
logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex);
this.startRefreshLocationTimerAsync();
return Mono.empty();
}).subscribeOn(scheduler);
}
public boolean isClosed() {
return this.isClosed;
}
} | class GlobalEndpointManager implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class);
private final int backgroundRefreshLocationTimeIntervalInMS;
private final LocationCache locationCache;
private final URI defaultEndpoint;
private final ConnectionPolicy connectionPolicy;
private final DatabaseAccountManagerInternal owner;
private final AtomicBoolean isRefreshing;
private final AtomicBoolean refreshInBackground;
private final ExecutorService executor = Executors.newSingleThreadExecutor();
private final Scheduler scheduler = Schedulers.fromExecutor(executor);
private volatile boolean isClosed;
private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true);
private volatile DatabaseAccount latestDatabaseAccount;
public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) {
this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000;
try {
this.locationCache = new LocationCache(
new ArrayList<>(connectionPolicy.getPreferredLocations() != null ?
connectionPolicy.getPreferredLocations():
Collections.emptyList()
),
owner.getServiceEndpoint(),
connectionPolicy.getEnableEndpointDiscovery(),
BridgeInternal.getUseMultipleWriteLocations(connectionPolicy),
configs);
this.owner = owner;
this.defaultEndpoint = owner.getServiceEndpoint();
this.connectionPolicy = connectionPolicy;
this.isRefreshing = new AtomicBoolean(false);
this.refreshInBackground = new AtomicBoolean(false);
this.isClosed = false;
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void init() {
startRefreshLocationTimerAsync(true).block();
}
public UnmodifiableList<URI> getReadEndpoints() {
return this.locationCache.getReadEndpoints();
}
public UnmodifiableList<URI> getWriteEndpoints() {
return this.locationCache.getWriteEndpoints();
}
public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync(
URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) {
return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume(
e -> {
logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage());
if (locations.isEmpty()) {
return Mono.error(e);
}
Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size())
.map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux());
Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single();
return res.doOnError(
innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage()));
});
}
public URI resolveServiceEndpoint(RxDocumentServiceRequest request) {
return this.locationCache.resolveServiceEndpoint(request);
}
public void markEndpointUnavailableForRead(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for read",endpoint);
this.locationCache.markEndpointUnavailableForRead(endpoint);;
}
public void markEndpointUnavailableForWrite(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for Write",endpoint);
this.locationCache.markEndpointUnavailableForWrite(endpoint);
}
public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) {
return this.locationCache.canUseMultipleWriteLocations(request);
}
public void close() {
this.isClosed = true;
this.executor.shutdown();
logger.debug("GlobalEndpointManager closed.");
}
public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) {
return Mono.defer(() -> {
logger.debug("refreshLocationAsync() invoked");
if (forceRefresh) {
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
return dbAccount;
}).flatMap(dbAccount -> {
return Mono.empty();
});
}
if (!isRefreshing.compareAndSet(false, true)) {
logger.debug("in the middle of another refresh. Not invoking a new refresh.");
return Mono.empty();
}
logger.debug("will refresh");
return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false));
});
}
/**
* This will provide the latest databaseAccount.
* If due to some reason last databaseAccount update was null,
* this method will return previous valid value
* @return DatabaseAccount
*/
public DatabaseAccount getLatestDatabaseAccount() {
return this.latestDatabaseAccount;
}
private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) {
return Mono.defer(() -> {
logger.debug("refreshLocationPrivateAsync() refreshing locations");
if (databaseAccount != null) {
this.locationCache.onDatabaseAccountRead(databaseAccount);
}
Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>();
if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) {
logger.debug("shouldRefreshEndpoints: true");
if (databaseAccount == null && !canRefreshInBackground.v) {
logger.debug("shouldRefreshEndpoints: can't be done in background");
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
this.isRefreshing.set(false);
return dbAccount;
}).flatMap(dbAccount -> {
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
return Mono.empty();
});
}
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
this.isRefreshing.set(false);
return Mono.empty();
} else {
logger.debug("shouldRefreshEndpoints: false, nothing to do.");
this.isRefreshing.set(false);
return Mono.empty();
}
});
}
private void startRefreshLocationTimerAsync() {
startRefreshLocationTimerAsync(false).subscribe();
}
private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) {
if (this.isClosed) {
logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed");
return Mono.empty();
}
logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS);
LocalDateTime now = LocalDateTime.now();
int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS;
this.refreshInBackground.set(true);
return Mono.delay(Duration.ofMillis(delayInMillis))
.flatMap(
t -> {
if (this.isClosed) {
logger.warn("client already closed");
return Mono.empty();
}
logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now);
Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.flatMap(dbAccount -> {
logger.debug("db account retrieved");
this.refreshInBackground.set(false);
return this.refreshLocationPrivateAsync(dbAccount);
});
}).onErrorResume(ex -> {
logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex);
this.startRefreshLocationTimerAsync();
return Mono.empty();
}).subscribeOn(scheduler);
}
public boolean isClosed() {
return this.isClosed;
}
} |
done | public DatabaseAccount getLatestDatabaseAccount() {
return this.latestDatabaseAccount;
} | return this.latestDatabaseAccount; | public DatabaseAccount getLatestDatabaseAccount() {
return this.latestDatabaseAccount;
} | class GlobalEndpointManager implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class);
private final int backgroundRefreshLocationTimeIntervalInMS;
private final LocationCache locationCache;
private final URI defaultEndpoint;
private final ConnectionPolicy connectionPolicy;
private final DatabaseAccountManagerInternal owner;
private final AtomicBoolean isRefreshing;
private final AtomicBoolean refreshInBackground;
private final ExecutorService executor = Executors.newSingleThreadExecutor();
private final Scheduler scheduler = Schedulers.fromExecutor(executor);
private volatile boolean isClosed;
private final AsyncCache<String, DatabaseAccount> databaseAccountAsyncCache;
private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true);
private volatile DatabaseAccount latestDatabaseAccount;
public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) {
this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000;
this.databaseAccountAsyncCache = new AsyncCache<>();
try {
this.locationCache = new LocationCache(
new ArrayList<>(connectionPolicy.getPreferredLocations() != null ?
connectionPolicy.getPreferredLocations():
Collections.emptyList()
),
owner.getServiceEndpoint(),
connectionPolicy.getEnableEndpointDiscovery(),
BridgeInternal.getUseMultipleWriteLocations(connectionPolicy),
configs);
this.owner = owner;
this.defaultEndpoint = owner.getServiceEndpoint();
this.connectionPolicy = connectionPolicy;
this.isRefreshing = new AtomicBoolean(false);
this.refreshInBackground = new AtomicBoolean(false);
this.isClosed = false;
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void init() {
startRefreshLocationTimerAsync(true).block();
}
public UnmodifiableList<URI> getReadEndpoints() {
return this.locationCache.getReadEndpoints();
}
public UnmodifiableList<URI> getWriteEndpoints() {
return this.locationCache.getWriteEndpoints();
}
public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync(
URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) {
return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume(
e -> {
logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage());
if (locations.isEmpty()) {
return Mono.error(e);
}
Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size())
.map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux());
Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single();
return res.doOnError(
innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage()));
});
}
public URI resolveServiceEndpoint(RxDocumentServiceRequest request) {
return this.locationCache.resolveServiceEndpoint(request);
}
public void markEndpointUnavailableForRead(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for read",endpoint);
this.locationCache.markEndpointUnavailableForRead(endpoint);;
}
public void markEndpointUnavailableForWrite(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for Write",endpoint);
this.locationCache.markEndpointUnavailableForWrite(endpoint);
}
public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) {
return this.locationCache.canUseMultipleWriteLocations(request);
}
public void close() {
this.isClosed = true;
this.executor.shutdown();
logger.debug("GlobalEndpointManager closed.");
}
public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) {
return Mono.defer(() -> {
logger.debug("refreshLocationAsync() invoked");
if (forceRefresh) {
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
return dbAccount;
}).flatMap(dbAccount -> {
return Mono.empty();
});
}
if (!isRefreshing.compareAndSet(false, true)) {
logger.debug("in the middle of another refresh. Not invoking a new refresh.");
return Mono.empty();
}
logger.debug("will refresh");
return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false));
});
}
public Mono<DatabaseAccount> getDatabaseAccountFromCache(URI defaultEndpoint) {
return this.databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, null, () -> this.owner.getDatabaseAccountFromEndpoint(defaultEndpoint).single().doOnSuccess(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
this.refreshLocationAsync(databaseAccount, false);
}));
}
private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) {
return Mono.defer(() -> {
logger.debug("refreshLocationPrivateAsync() refreshing locations");
if (databaseAccount != null) {
this.locationCache.onDatabaseAccountRead(databaseAccount);
}
Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>();
if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) {
logger.debug("shouldRefreshEndpoints: true");
if (databaseAccount == null && !canRefreshInBackground.v) {
logger.debug("shouldRefreshEndpoints: can't be done in background");
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
this.isRefreshing.set(false);
return dbAccount;
}).flatMap(dbAccount -> {
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
return Mono.empty();
});
}
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
this.isRefreshing.set(false);
return Mono.empty();
} else {
logger.debug("shouldRefreshEndpoints: false, nothing to do.");
this.isRefreshing.set(false);
return Mono.empty();
}
});
}
private void startRefreshLocationTimerAsync() {
startRefreshLocationTimerAsync(false).subscribe();
}
private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) {
if (this.isClosed) {
logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed");
return Mono.empty();
}
logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS);
LocalDateTime now = LocalDateTime.now();
int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS;
this.refreshInBackground.set(true);
return Mono.delay(Duration.ofMillis(delayInMillis))
.flatMap(
t -> {
if (this.isClosed) {
logger.warn("client already closed");
return Mono.empty();
}
logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now);
Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.flatMap(dbAccount -> {
logger.debug("db account retrieved");
this.refreshInBackground.set(false);
return this.refreshLocationPrivateAsync(dbAccount);
});
}).onErrorResume(ex -> {
logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex);
this.startRefreshLocationTimerAsync();
return Mono.empty();
}).subscribeOn(scheduler);
}
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) {
final GlobalEndpointManager that = this;
Callable<Mono<DatabaseAccount>> fetchDatabaseAccount = () -> {
return that.owner.getDatabaseAccountFromEndpoint(serviceEndpoint).doOnNext(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
logger.debug("account retrieved: {}", databaseAccount);
}).single();
};
Mono<DatabaseAccount> obsoleteValueMono = databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, null, fetchDatabaseAccount);
return obsoleteValueMono.flatMap(obsoleteValue -> {
if (firstTimeDatabaseAccountInitialization.compareAndSet(true, false)) {
return Mono.just(obsoleteValue);
}
return databaseAccountAsyncCache.getAsync(StringUtils.EMPTY, obsoleteValue, fetchDatabaseAccount).doOnError(t -> {
databaseAccountAsyncCache.set(StringUtils.EMPTY, obsoleteValue);
});
});
}
public boolean isClosed() {
return this.isClosed;
}
} | class GlobalEndpointManager implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class);
private final int backgroundRefreshLocationTimeIntervalInMS;
private final LocationCache locationCache;
private final URI defaultEndpoint;
private final ConnectionPolicy connectionPolicy;
private final DatabaseAccountManagerInternal owner;
private final AtomicBoolean isRefreshing;
private final AtomicBoolean refreshInBackground;
private final ExecutorService executor = Executors.newSingleThreadExecutor();
private final Scheduler scheduler = Schedulers.fromExecutor(executor);
private volatile boolean isClosed;
private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true);
private volatile DatabaseAccount latestDatabaseAccount;
public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) {
this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000;
try {
this.locationCache = new LocationCache(
new ArrayList<>(connectionPolicy.getPreferredLocations() != null ?
connectionPolicy.getPreferredLocations():
Collections.emptyList()
),
owner.getServiceEndpoint(),
connectionPolicy.getEnableEndpointDiscovery(),
BridgeInternal.getUseMultipleWriteLocations(connectionPolicy),
configs);
this.owner = owner;
this.defaultEndpoint = owner.getServiceEndpoint();
this.connectionPolicy = connectionPolicy;
this.isRefreshing = new AtomicBoolean(false);
this.refreshInBackground = new AtomicBoolean(false);
this.isClosed = false;
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
public void init() {
startRefreshLocationTimerAsync(true).block();
}
public UnmodifiableList<URI> getReadEndpoints() {
return this.locationCache.getReadEndpoints();
}
public UnmodifiableList<URI> getWriteEndpoints() {
return this.locationCache.getWriteEndpoints();
}
public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync(
URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) {
return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume(
e -> {
logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage());
if (locations.isEmpty()) {
return Mono.error(e);
}
Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size())
.map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux());
Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single();
return res.doOnError(
innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage()));
});
}
public URI resolveServiceEndpoint(RxDocumentServiceRequest request) {
return this.locationCache.resolveServiceEndpoint(request);
}
public void markEndpointUnavailableForRead(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for read",endpoint);
this.locationCache.markEndpointUnavailableForRead(endpoint);;
}
public void markEndpointUnavailableForWrite(URI endpoint) {
logger.debug("Marking endpoint {} unavailable for Write",endpoint);
this.locationCache.markEndpointUnavailableForWrite(endpoint);
}
public boolean CanUseMultipleWriteLocations(RxDocumentServiceRequest request) {
return this.locationCache.canUseMultipleWriteLocations(request);
}
public void close() {
this.isClosed = true;
this.executor.shutdown();
logger.debug("GlobalEndpointManager closed.");
}
public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) {
return Mono.defer(() -> {
logger.debug("refreshLocationAsync() invoked");
if (forceRefresh) {
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
return dbAccount;
}).flatMap(dbAccount -> {
return Mono.empty();
});
}
if (!isRefreshing.compareAndSet(false, true)) {
logger.debug("in the middle of another refresh. Not invoking a new refresh.");
return Mono.empty();
}
logger.debug("will refresh");
return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false));
});
}
/**
* This will provide the latest databaseAccount.
* If due to some reason last databaseAccount update was null,
* this method will return previous valid value
* @return DatabaseAccount
*/
private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) {
return Mono.defer(() -> {
logger.debug("refreshLocationPrivateAsync() refreshing locations");
if (databaseAccount != null) {
this.locationCache.onDatabaseAccountRead(databaseAccount);
}
Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>();
if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) {
logger.debug("shouldRefreshEndpoints: true");
if (databaseAccount == null && !canRefreshInBackground.v) {
logger.debug("shouldRefreshEndpoints: can't be done in background");
Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync(
this.defaultEndpoint,
new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.map(dbAccount -> {
this.locationCache.onDatabaseAccountRead(dbAccount);
this.isRefreshing.set(false);
return dbAccount;
}).flatMap(dbAccount -> {
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
return Mono.empty();
});
}
if (!this.refreshInBackground.get()) {
this.startRefreshLocationTimerAsync();
}
this.isRefreshing.set(false);
return Mono.empty();
} else {
logger.debug("shouldRefreshEndpoints: false, nothing to do.");
this.isRefreshing.set(false);
return Mono.empty();
}
});
}
private void startRefreshLocationTimerAsync() {
startRefreshLocationTimerAsync(false).subscribe();
}
private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) {
if (this.isClosed) {
logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed");
return Mono.empty();
}
logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS);
LocalDateTime now = LocalDateTime.now();
int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS;
this.refreshInBackground.set(true);
return Mono.delay(Duration.ofMillis(delayInMillis))
.flatMap(
t -> {
if (this.isClosed) {
logger.warn("client already closed");
return Mono.empty();
}
logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now);
Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredLocations()),
this::getDatabaseAccountAsync);
return databaseAccountObs.flatMap(dbAccount -> {
logger.debug("db account retrieved");
this.refreshInBackground.set(false);
return this.refreshLocationPrivateAsync(dbAccount);
});
}).onErrorResume(ex -> {
logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex);
this.startRefreshLocationTimerAsync();
return Mono.empty();
}).subscribeOn(scheduler);
}
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) {
return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint)
.doOnNext(databaseAccount -> {
if(databaseAccount != null) {
this.latestDatabaseAccount = databaseAccount;
}
logger.debug("account retrieved: {}", databaseAccount);
}).single();
}
public boolean isClosed() {
return this.isClosed;
}
} |
done | private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
} | DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); | private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
assert(databaseAccount != null);
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean enableTransportClientSharing;
private CosmosKeyCredential cosmosKeyCredential;
private TokenResolver tokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
TokenResolver tokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean enableTransportClientSharing) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, enableTransportClientSharing);
this.tokenResolver = tokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean enableTransportClientSharing) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, enableTransportClientSharing);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean enableTransportClientSharing) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.enableTransportClientSharing = enableTransportClientSharing;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeoutInMillis() / 1000,
0,
this.userAgentContainer,
this.enableTransportClientSharing
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis());
return HttpClient.createFixed(httpClientConfig);
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ((JsonSerializable) object).toJson();
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsString != null) {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
cosmosItemProperties = new CosmosItemProperties(contentAsString);
}
partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = document.getObjectByPath(parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
String content = toJsonString(document, mapper);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.tokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.tokenResolver != null) {
return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
try {
return CosmosResourceType.valueOf(resourceType.toString());
} catch (IllegalArgumentException e) {
return CosmosResourceType.System;
}
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
String content = toJsonString(document, mapper);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(req);
}
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(collection
.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document -> document.toObject(klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private String getCurentParamName(int paramCnt){
return "@param" + paramCnt;
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType,
Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
RequestOptions requestOptions = new RequestOptions();
requestOptions.setPartitionKey(options.partitionKey());
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs);
return requestObs.flatMap(req -> this.readFeed(req)
.map(response -> toFeedResponsePage(response, klass)));
}, this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private CosmosKeyCredential cosmosKeyCredential;
private TokenResolver tokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
TokenResolver tokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled);
this.tokenResolver = tokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeoutInMillis() / 1000,
0,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig);
} else {
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ((JsonSerializable) object).toJson();
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null) {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
contentAsByteBuffer.rewind();
cosmosItemProperties = new CosmosItemProperties(contentAsByteBuffer);
}
partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = document.getObjectByPath(parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
ByteBuffer content = serializeJsonToByteBuffer(document, mapper);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.tokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.tokenResolver != null) {
return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
try {
return CosmosResourceType.valueOf(resourceType.toString());
} catch (IllegalArgumentException e) {
return CosmosResourceType.System;
}
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
ByteBuffer content = serializeJsonToByteBuffer(document, mapper);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(req);
}
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(collection
.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document -> document.toObject(klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private String getCurentParamName(int paramCnt){
return "@param" + paramCnt;
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType,
Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
RequestOptions requestOptions = new RequestOptions();
requestOptions.setPartitionKey(options.partitionKey());
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request);
Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs);
return requestObs.flatMap(req -> this.readFeed(req)
.map(response -> toFeedResponsePage(response, klass)));
}, this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} |
We may need to create a resource group name, will need to handle logic slightly differently here. When tests are running in an Azure Pipeline we should trust that the resource group name is associated to an existing resource group. If there isn't a resource group environment variable we need to create the resource group, this will be needed when running tests locally. Additionally, running tests locally could support using the environment variable for resource group but it shouldn't create the group, if it doesn't exist it is on the person running the tests but we need to document this well. | public void createResourceGroup() {
if (resourceGroup == null) {
String resourceGroupName = Configuration.getGlobalConfiguration().get(AZURE_RESOURCEGROUP_NAME);
System.out.println("Creating Resource Group: " + resourceGroupName);
resourceGroup = azure.resourceGroups()
.define(resourceGroupName)
.withRegion(location)
.create();
}
} | public void createResourceGroup() {
String resourceGroupName = Configuration.getGlobalConfiguration().get(AZURE_RESOURCEGROUP_NAME);
if (azure.resourceGroups().checkExistence(resourceGroupName)) {
System.out.println("Fetching Resource Group: " + resourceGroupName);
resourceGroup = azure.resourceGroups()
.getByName(resourceGroupName);
} else {
System.out.println("Creating Resource Group: " + resourceGroupName);
resourceGroup = azure.resourceGroups()
.define(resourceGroupName)
.withRegion(location)
.create();
}
} | class variables
* to be retrieved later.
*/
public void initialize() {
validate();
if (azure == null) {
azure = Azure.configure()
.authenticate(azureTokenCredentials)
.withSubscription(subscriptionId);
}
} | class variables
* to be retrieved later.
*/
public void initialize() {
validate();
if (azure == null) {
azure = Azure.configure()
.authenticate(azureTokenCredentials)
.withSubscription(subscriptionId);
}
} | |
Any particular reason we are adding a call the resource group creation but removing one to group deletion? | public static void afterAll() {
} | public static void afterAll() {
} | class SearchServiceTestBase extends TestBase {
private static final String DEFAULT_DNS_SUFFIX = "search.windows.net";
private static final String DOGFOOD_DNS_SUFFIX = "search-dogfood.windows-int.net";
private static final String FAKE_DESCRIPTION = "Some data source";
private static final String AZURE_TEST_MODE = "AZURE_TEST_MODE";
private static final String AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND =
"Server=tcp:azs-playground.database.windows.net,1433;Database=usgs;User ID=reader;Password=EdrERBt3j6mZDP;Trusted_Connection=False;Encrypt=True;Connection Timeout=30;";
private static final ObjectMapper OBJECT_MAPPER;
static {
OBJECT_MAPPER = new ObjectMapper();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
df.setTimeZone(TimeZone.getDefault());
OBJECT_MAPPER.setDateFormat(df);
OBJECT_MAPPER.registerModule(new JavaTimeModule());
OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
static final String HOTEL_INDEX_NAME = "hotels";
static final String BLOB_DATASOURCE_NAME = "azs-java-live-blob";
static final String BLOB_DATASOURCE_TEST_NAME = "azs-java-test-blob";
static final String SQL_DATASOURCE_NAME = "azs-java-test-sql";
private String searchServiceName;
private String searchDnsSuffix;
protected String endpoint;
SearchApiKeyCredential searchApiKeyCredential;
private static String testEnvironment;
private static AzureSearchResources azureSearchResources;
@Rule
public TestName testName = new TestName();
@BeforeAll
public static void beforeAll() {
initializeAzureResources();
azureSearchResources.createResourceGroup();
}
@AfterAll
@Override
protected void beforeTest() {
searchDnsSuffix = testEnvironment.equals("DOGFOOD") ? DOGFOOD_DNS_SUFFIX : DEFAULT_DNS_SUFFIX;
if (!interceptorManager.isPlaybackMode()) {
azureSearchResources.initialize();
azureSearchResources.createService(testResourceNamer);
searchApiKeyCredential = new SearchApiKeyCredential(azureSearchResources.getSearchAdminKey());
}
searchServiceName = azureSearchResources.getSearchServiceName();
endpoint = String.format("https:
}
@Override
protected void afterTest() {
super.afterTest();
azureSearchResources.deleteService();
}
protected SearchServiceClientBuilder getSearchServiceClientBuilder() {
return getSearchServiceClientBuilderWithHttpPipelinePolicies(null);
}
/**
* Provides a way to inject custom HTTP pipeline policies before the client is instantiated
*
* @param policies the additional HTTP pipeline policies
* @return {@link SearchServiceClientBuilder}
*/
SearchServiceClientBuilder getSearchServiceClientBuilderWithHttpPipelinePolicies(
List<HttpPipelinePolicy> policies) {
SearchServiceClientBuilder builder = new SearchServiceClientBuilder()
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
addPolicies(builder, policies);
return builder;
}
addPolicies(builder, policies);
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private void addPolicies(SearchServiceClientBuilder builder, List<HttpPipelinePolicy> policies) {
if (policies != null && policies.size() > 0) {
for (HttpPipelinePolicy policy : policies) {
builder.addPolicy(policy);
}
}
}
Index createTestIndex() {
Map<String, Double> weights = new HashMap<>();
weights.put("Description", 1.5);
weights.put("Category", 2.0);
return new Index()
.setName(HOTEL_INDEX_NAME)
.setFields(Arrays.asList(
new Field()
.setName("HotelId")
.setType(DataType.EDM_STRING)
.setKey(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("HotelName")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description_Custom")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setSearchAnalyzer(AnalyzerName.STOP.toString())
.setIndexAnalyzer(AnalyzerName.STOP.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Category")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("ParkingIncluded")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("LastRenovationDate")
.setType(DataType.EDM_DATE_TIME_OFFSET)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rating")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Address")
.setType(DataType.EDM_COMPLEX_TYPE)
.setFields(Arrays.asList(
new Field()
.setName("StreetAddress")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("City")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("StateProvince")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Country")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("PostalCode")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("Location")
.setType(DataType.EDM_GEOGRAPHY_POINT)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rooms")
.setType(DataType.Collection(DataType.EDM_COMPLEX_TYPE))
.setFields(Arrays.asList(
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString()),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString()),
new Field()
.setName("Type")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BaseRate")
.setType(DataType.EDM_DOUBLE)
.setKey(Boolean.FALSE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BedOptions")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SleepsCount")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("TotalGuests")
.setType(DataType.EDM_INT64)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE),
new Field()
.setName("ProfitMargin")
.setType(DataType.EDM_DOUBLE)
)
)
.setScoringProfiles(Arrays.asList(
new ScoringProfile()
.setName("MyProfile")
.setFunctionAggregation(ScoringFunctionAggregation.AVERAGE)
.setFunctions(Arrays.asList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(4)
.setShouldBoostBeyondRangeByConstant(true))
.setFieldName("Rating")
.setBoost(2.0)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT),
new DistanceScoringFunction()
.setParameters(new DistanceScoringParameters()
.setBoostingDistance(5)
.setReferencePointParameter("Loc"))
.setFieldName("Location")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR),
new FreshnessScoringFunction()
.setParameters(new FreshnessScoringParameters()
.setBoostingDuration(Duration.ofDays(365)))
.setFieldName("LastRenovationDate")
.setBoost(1.1)
.setInterpolation(ScoringFunctionInterpolation.LOGARITHMIC)
))
.setTextWeights(new TextWeights()
.setWeights(weights)),
new ScoringProfile()
.setName("ProfileTwo")
.setFunctionAggregation(ScoringFunctionAggregation.MAXIMUM)
.setFunctions(Collections.singletonList(
new TagScoringFunction()
.setParameters(new TagScoringParameters().setTagsParameter("MyTags"))
.setFieldName("Tags")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR)
)),
new ScoringProfile()
.setName("ProfileThree")
.setFunctionAggregation(ScoringFunctionAggregation.MINIMUM)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(0)
.setBoostingRangeEnd(10)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.0)
.setInterpolation(ScoringFunctionInterpolation.QUADRATIC)
)),
new ScoringProfile()
.setName("ProfileFour")
.setFunctionAggregation(ScoringFunctionAggregation.FIRST_MATCHING)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(5)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.14)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT)
))
))
.setDefaultScoringProfile("MyProfile")
.setCorsOptions(new CorsOptions()
.setAllowedOrigins("http:
.setMaxAgeInSeconds(60L))
.setSuggesters(Collections.singletonList(new Suggester()
.setName("FancySuggester")
.setSourceFields(Collections.singletonList("HotelName"))));
}
DataSource createTestSqlDataSourceObject(DataDeletionDetectionPolicy deletionDetectionPolicy,
DataChangeDetectionPolicy changeDetectionPolicy) {
return DataSources.azureSql(
SearchServiceTestBase.SQL_DATASOURCE_NAME,
AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND,
"GeoNamesRI",
FAKE_DESCRIPTION,
changeDetectionPolicy,
deletionDetectionPolicy
);
}
DataSource createTestSqlDataSourceObject() {
return createTestSqlDataSourceObject(null, null);
}
/**
* create a new blob data source object
* @return the created data source
*/
DataSource createBlobDataSource() {
String storageConnString = "connectionString";
String blobContainerDatasourceName = "container";
if (!interceptorManager.isPlaybackMode()) {
storageConnString = azureSearchResources.createStorageAccount(testResourceNamer);
blobContainerDatasourceName =
azureSearchResources.createBlobContainer(storageConnString, testResourceNamer);
}
return DataSources.azureBlobStorage(
BLOB_DATASOURCE_NAME,
storageConnString,
blobContainerDatasourceName,
"/",
"real live blob",
new SoftDeleteColumnDeletionDetectionPolicy()
.setSoftDeleteColumnName("fieldName")
.setSoftDeleteMarkerValue("someValue")
);
}
private static void initializeAzureResources() {
String appId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String azureDomainId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TENANT_ID);
String secret = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
testEnvironment = Configuration.getGlobalConfiguration().get("AZURE_TEST_ENVIRONMENT");
testEnvironment = (testEnvironment == null) ? "AZURE" : testEnvironment.toUpperCase(Locale.US);
AzureEnvironment environment = testEnvironment.equals("DOGFOOD") ? getDogfoodEnvironment() : AzureEnvironment.AZURE;
ApplicationTokenCredentials applicationTokenCredentials =
new ApplicationTokenCredentials(appId, azureDomainId, secret, environment);
azureSearchResources = new AzureSearchResources(applicationTokenCredentials, subscriptionId, Region.US_WEST2);
}
private static AzureEnvironment getDogfoodEnvironment() {
HashMap<String, String> configuration = new HashMap<>();
configuration.put("portalUrl", "http:
configuration.put("managementEndpointUrl", "https:
configuration.put("resourceManagerEndpointUrl", "https:
configuration.put("activeDirectoryEndpointUrl", "https:
configuration.put("activeDirectoryResourceId", "https:
configuration.put("activeDirectoryGraphResourceId", "https:
configuration.put("activeDirectoryGraphApiVersion", "2013-04-05");
return new AzureEnvironment(configuration);
}
protected SearchIndexClientBuilder getSearchIndexClientBuilder(String indexName) {
SearchIndexClientBuilder builder = new SearchIndexClientBuilder()
.endpoint(String.format("https:
.indexName(indexName);
if (interceptorManager.isPlaybackMode()) {
return builder.httpClient(interceptorManager.getPlaybackClient());
}
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected void waitForIndexing() {
sleepIfRunningAgainstService(2000);
}
/**
* If the document schema is known, user can convert the properties to a specific object type
*
* @param cls Class type of the document object to convert to
* @param <T> type
* @return an object of the request type
*/
static <T> T convertToType(Object document, Class<T> cls) {
return OBJECT_MAPPER.convertValue(document, cls);
}
void addFieldToIndex(Index index, Field field) {
List<Field> fields = new ArrayList<>(index.getFields());
fields.add(field);
index.setFields(fields);
}
/**
* Constructs a request options object with client request Id.
* @return a RequestOptions object with ClientRequestId.
*/
protected RequestOptions generateRequestOptions() {
return new RequestOptions().setClientRequestId(UUID.randomUUID());
}
void assertHttpResponseException(Runnable exceptionThrower, HttpResponseStatus expectedResponseStatus,
String expectedMessage) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
verifyHttpResponseError(ex, expectedResponseStatus, expectedMessage);
}
}
void assertHttpResponseExceptionAsync(Publisher<?> exceptionThrower) {
StepVerifier.create(exceptionThrower)
.verifyErrorSatisfies(error -> verifyHttpResponseError(error, HttpResponseStatus.BAD_REQUEST,
"Invalid expression: Could not find a property named 'ThisFieldDoesNotExist' on type 'search.document'."));
}
private void verifyHttpResponseError(
Throwable ex, HttpResponseStatus expectedResponseStatus, String expectedMessage) {
assertEquals(HttpResponseException.class, ex.getClass());
if (expectedResponseStatus != null) {
assertEquals(
expectedResponseStatus.code(),
((HttpResponseException) ex).getResponse().getStatusCode());
}
if (expectedMessage != null) {
assertTrue(ex.getMessage().contains(expectedMessage));
}
}
ServiceStatistics getExpectedServiceStatistics() {
ServiceCounters serviceCounters = new ServiceCounters()
.setDocumentCounter(new ResourceCounter().setUsage(0).setQuota(null))
.setIndexCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setIndexerCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setDataSourceCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setStorageSizeCounter(new ResourceCounter().setUsage(0).setQuota(52428800L))
.setSynonymMapCounter(new ResourceCounter().setUsage(0).setQuota(3L));
ServiceLimits serviceLimits = new ServiceLimits()
.setMaxFieldsPerIndex(1000)
.setMaxFieldNestingDepthPerIndex(10)
.setMaxComplexCollectionFieldsPerIndex(40)
.setMaxComplexObjectsInCollectionsPerDocument(3000);
return new ServiceStatistics()
.setCounters(serviceCounters)
.setLimits(serviceLimits);
}
static boolean liveMode() {
return setupTestMode() == TestMode.LIVE;
}
static TestMode setupTestMode() {
String testMode = Configuration.getGlobalConfiguration().get(AZURE_TEST_MODE);
if (testMode != null) {
try {
return TestMode.valueOf(testMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException ignore) {
return TestMode.PLAYBACK;
}
}
return TestMode.PLAYBACK;
}
} | class SearchServiceTestBase extends TestBase {
private static final String DEFAULT_DNS_SUFFIX = "search.windows.net";
private static final String DOGFOOD_DNS_SUFFIX = "search-dogfood.windows-int.net";
private static final String FAKE_DESCRIPTION = "Some data source";
private static final String AZURE_TEST_MODE = "AZURE_TEST_MODE";
private static final String AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND =
"Server=tcp:azs-playground.database.windows.net,1433;Database=usgs;User ID=reader;Password=EdrERBt3j6mZDP;Trusted_Connection=False;Encrypt=True;Connection Timeout=30;";
private static final ObjectMapper OBJECT_MAPPER;
static {
OBJECT_MAPPER = new ObjectMapper();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
df.setTimeZone(TimeZone.getDefault());
OBJECT_MAPPER.setDateFormat(df);
OBJECT_MAPPER.registerModule(new JavaTimeModule());
OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
static final String HOTEL_INDEX_NAME = "hotels";
static final String BLOB_DATASOURCE_NAME = "azs-java-live-blob";
static final String BLOB_DATASOURCE_TEST_NAME = "azs-java-test-blob";
static final String SQL_DATASOURCE_NAME = "azs-java-test-sql";
private String searchServiceName;
private String searchDnsSuffix;
protected String endpoint;
SearchApiKeyCredential searchApiKeyCredential;
private static String testEnvironment;
private static AzureSearchResources azureSearchResources;
@Rule
public TestName testName = new TestName();
@BeforeAll
public static void beforeAll() {
initializeAzureResources();
if (!playbackMode()) {
azureSearchResources.initialize();
azureSearchResources.createResourceGroup();
}
}
@AfterAll
@Override
protected void beforeTest() {
searchDnsSuffix = testEnvironment.equals("DOGFOOD") ? DOGFOOD_DNS_SUFFIX : DEFAULT_DNS_SUFFIX;
if (!interceptorManager.isPlaybackMode()) {
azureSearchResources.createService(testResourceNamer);
searchApiKeyCredential = new SearchApiKeyCredential(azureSearchResources.getSearchAdminKey());
}
searchServiceName = azureSearchResources.getSearchServiceName();
endpoint = String.format("https:
}
@Override
protected void afterTest() {
super.afterTest();
azureSearchResources.deleteService();
}
protected SearchServiceClientBuilder getSearchServiceClientBuilder() {
return getSearchServiceClientBuilderWithHttpPipelinePolicies(null);
}
/**
* Provides a way to inject custom HTTP pipeline policies before the client is instantiated
*
* @param policies the additional HTTP pipeline policies
* @return {@link SearchServiceClientBuilder}
*/
SearchServiceClientBuilder getSearchServiceClientBuilderWithHttpPipelinePolicies(
List<HttpPipelinePolicy> policies) {
SearchServiceClientBuilder builder = new SearchServiceClientBuilder()
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
addPolicies(builder, policies);
return builder;
}
addPolicies(builder, policies);
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private void addPolicies(SearchServiceClientBuilder builder, List<HttpPipelinePolicy> policies) {
if (policies != null && policies.size() > 0) {
for (HttpPipelinePolicy policy : policies) {
builder.addPolicy(policy);
}
}
}
Index createTestIndex() {
Map<String, Double> weights = new HashMap<>();
weights.put("Description", 1.5);
weights.put("Category", 2.0);
return new Index()
.setName(HOTEL_INDEX_NAME)
.setFields(Arrays.asList(
new Field()
.setName("HotelId")
.setType(DataType.EDM_STRING)
.setKey(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("HotelName")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description_Custom")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setSearchAnalyzer(AnalyzerName.STOP.toString())
.setIndexAnalyzer(AnalyzerName.STOP.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Category")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("ParkingIncluded")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("LastRenovationDate")
.setType(DataType.EDM_DATE_TIME_OFFSET)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rating")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Address")
.setType(DataType.EDM_COMPLEX_TYPE)
.setFields(Arrays.asList(
new Field()
.setName("StreetAddress")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("City")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("StateProvince")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Country")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("PostalCode")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("Location")
.setType(DataType.EDM_GEOGRAPHY_POINT)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rooms")
.setType(DataType.Collection(DataType.EDM_COMPLEX_TYPE))
.setFields(Arrays.asList(
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString()),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString()),
new Field()
.setName("Type")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BaseRate")
.setType(DataType.EDM_DOUBLE)
.setKey(Boolean.FALSE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BedOptions")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SleepsCount")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("TotalGuests")
.setType(DataType.EDM_INT64)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE),
new Field()
.setName("ProfitMargin")
.setType(DataType.EDM_DOUBLE)
)
)
.setScoringProfiles(Arrays.asList(
new ScoringProfile()
.setName("MyProfile")
.setFunctionAggregation(ScoringFunctionAggregation.AVERAGE)
.setFunctions(Arrays.asList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(4)
.setShouldBoostBeyondRangeByConstant(true))
.setFieldName("Rating")
.setBoost(2.0)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT),
new DistanceScoringFunction()
.setParameters(new DistanceScoringParameters()
.setBoostingDistance(5)
.setReferencePointParameter("Loc"))
.setFieldName("Location")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR),
new FreshnessScoringFunction()
.setParameters(new FreshnessScoringParameters()
.setBoostingDuration(Duration.ofDays(365)))
.setFieldName("LastRenovationDate")
.setBoost(1.1)
.setInterpolation(ScoringFunctionInterpolation.LOGARITHMIC)
))
.setTextWeights(new TextWeights()
.setWeights(weights)),
new ScoringProfile()
.setName("ProfileTwo")
.setFunctionAggregation(ScoringFunctionAggregation.MAXIMUM)
.setFunctions(Collections.singletonList(
new TagScoringFunction()
.setParameters(new TagScoringParameters().setTagsParameter("MyTags"))
.setFieldName("Tags")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR)
)),
new ScoringProfile()
.setName("ProfileThree")
.setFunctionAggregation(ScoringFunctionAggregation.MINIMUM)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(0)
.setBoostingRangeEnd(10)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.0)
.setInterpolation(ScoringFunctionInterpolation.QUADRATIC)
)),
new ScoringProfile()
.setName("ProfileFour")
.setFunctionAggregation(ScoringFunctionAggregation.FIRST_MATCHING)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(5)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.14)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT)
))
))
.setDefaultScoringProfile("MyProfile")
.setCorsOptions(new CorsOptions()
.setAllowedOrigins("http:
.setMaxAgeInSeconds(60L))
.setSuggesters(Collections.singletonList(new Suggester()
.setName("FancySuggester")
.setSourceFields(Collections.singletonList("HotelName"))));
}
DataSource createTestSqlDataSourceObject(DataDeletionDetectionPolicy deletionDetectionPolicy,
DataChangeDetectionPolicy changeDetectionPolicy) {
return DataSources.azureSql(
SearchServiceTestBase.SQL_DATASOURCE_NAME,
AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND,
"GeoNamesRI",
FAKE_DESCRIPTION,
changeDetectionPolicy,
deletionDetectionPolicy
);
}
DataSource createTestSqlDataSourceObject() {
return createTestSqlDataSourceObject(null, null);
}
/**
* create a new blob data source object
* @return the created data source
*/
DataSource createBlobDataSource() {
String storageConnString = "connectionString";
String blobContainerDatasourceName = "container";
if (!interceptorManager.isPlaybackMode()) {
storageConnString = azureSearchResources.createStorageAccount(testResourceNamer);
blobContainerDatasourceName =
azureSearchResources.createBlobContainer(storageConnString, testResourceNamer);
}
return DataSources.azureBlobStorage(
BLOB_DATASOURCE_NAME,
storageConnString,
blobContainerDatasourceName,
"/",
"real live blob",
new SoftDeleteColumnDeletionDetectionPolicy()
.setSoftDeleteColumnName("fieldName")
.setSoftDeleteMarkerValue("someValue")
);
}
private static void initializeAzureResources() {
String appId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String azureDomainId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TENANT_ID);
String secret = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
testEnvironment = Configuration.getGlobalConfiguration().get("AZURE_TEST_ENVIRONMENT");
testEnvironment = (testEnvironment == null) ? "AZURE" : testEnvironment.toUpperCase(Locale.US);
AzureEnvironment environment = testEnvironment.equals("DOGFOOD") ? getDogfoodEnvironment() : AzureEnvironment.AZURE;
ApplicationTokenCredentials applicationTokenCredentials =
new ApplicationTokenCredentials(appId, azureDomainId, secret, environment);
azureSearchResources = new AzureSearchResources(applicationTokenCredentials, subscriptionId, Region.US_WEST2);
}
private static AzureEnvironment getDogfoodEnvironment() {
HashMap<String, String> configuration = new HashMap<>();
configuration.put("portalUrl", "http:
configuration.put("managementEndpointUrl", "https:
configuration.put("resourceManagerEndpointUrl", "https:
configuration.put("activeDirectoryEndpointUrl", "https:
configuration.put("activeDirectoryResourceId", "https:
configuration.put("activeDirectoryGraphResourceId", "https:
configuration.put("activeDirectoryGraphApiVersion", "2013-04-05");
return new AzureEnvironment(configuration);
}
protected SearchIndexClientBuilder getSearchIndexClientBuilder(String indexName) {
SearchIndexClientBuilder builder = new SearchIndexClientBuilder()
.endpoint(String.format("https:
.indexName(indexName);
if (interceptorManager.isPlaybackMode()) {
return builder.httpClient(interceptorManager.getPlaybackClient());
}
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected void waitForIndexing() {
sleepIfRunningAgainstService(2000);
}
/**
* If the document schema is known, user can convert the properties to a specific object type
*
* @param cls Class type of the document object to convert to
* @param <T> type
* @return an object of the request type
*/
static <T> T convertToType(Object document, Class<T> cls) {
return OBJECT_MAPPER.convertValue(document, cls);
}
void addFieldToIndex(Index index, Field field) {
List<Field> fields = new ArrayList<>(index.getFields());
fields.add(field);
index.setFields(fields);
}
/**
* Constructs a request options object with client request Id.
* @return a RequestOptions object with ClientRequestId.
*/
protected RequestOptions generateRequestOptions() {
return new RequestOptions().setClientRequestId(UUID.randomUUID());
}
void assertHttpResponseException(Runnable exceptionThrower, HttpResponseStatus expectedResponseStatus,
String expectedMessage) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
verifyHttpResponseError(ex, expectedResponseStatus, expectedMessage);
}
}
void assertHttpResponseExceptionAsync(Publisher<?> exceptionThrower) {
StepVerifier.create(exceptionThrower)
.verifyErrorSatisfies(error -> verifyHttpResponseError(error, HttpResponseStatus.BAD_REQUEST,
"Invalid expression: Could not find a property named 'ThisFieldDoesNotExist' on type 'search.document'."));
}
private void verifyHttpResponseError(
Throwable ex, HttpResponseStatus expectedResponseStatus, String expectedMessage) {
assertEquals(HttpResponseException.class, ex.getClass());
if (expectedResponseStatus != null) {
assertEquals(
expectedResponseStatus.code(),
((HttpResponseException) ex).getResponse().getStatusCode());
}
if (expectedMessage != null) {
assertTrue(ex.getMessage().contains(expectedMessage));
}
}
ServiceStatistics getExpectedServiceStatistics() {
ServiceCounters serviceCounters = new ServiceCounters()
.setDocumentCounter(new ResourceCounter().setUsage(0).setQuota(null))
.setIndexCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setIndexerCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setDataSourceCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setStorageSizeCounter(new ResourceCounter().setUsage(0).setQuota(52428800L))
.setSynonymMapCounter(new ResourceCounter().setUsage(0).setQuota(3L));
ServiceLimits serviceLimits = new ServiceLimits()
.setMaxFieldsPerIndex(1000)
.setMaxFieldNestingDepthPerIndex(10)
.setMaxComplexCollectionFieldsPerIndex(40)
.setMaxComplexObjectsInCollectionsPerDocument(3000);
return new ServiceStatistics()
.setCounters(serviceCounters)
.setLimits(serviceLimits);
}
static boolean liveMode() {
return setupTestMode() == TestMode.LIVE;
}
static boolean playbackMode() {
return setupTestMode() == TestMode.PLAYBACK;
}
static TestMode setupTestMode() {
String testMode = Configuration.getGlobalConfiguration().get(AZURE_TEST_MODE);
if (testMode != null) {
try {
return TestMode.valueOf(testMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException ignore) {
return TestMode.PLAYBACK;
}
}
return TestMode.PLAYBACK;
}
} | |
Same comment about non-needed try/catch | public Mono<Boolean> exists() {
try {
return existsWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
} | return monoError(logger, ex); | public Mono<Boolean> exists() {
return existsWithResponse().flatMap(FluxUtil::toMono);
} | class ShareDirectoryAsyncClient {
private final ClientLogger logger = new ClientLogger(ShareDirectoryAsyncClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String directoryPath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
/**
* Creates a ShareDirectoryAsyncClient that sends requests to the storage directory at {@link
* AzureFileStorageImpl
* {@code client}.
*
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param directoryPath Name of the directory
* @param snapshot The snapshot of the share
*/
ShareDirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath,
String snapshot, String accountName, ShareServiceVersion serviceVersion) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(directoryPath);
this.shareName = shareName;
this.directoryPath = directoryPath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
}
/**
* Get the url of the storage directory client.
*
* @return the URL of the storage directory client
*/
public String getDirectoryUrl() {
StringBuilder directoryUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(directoryPath);
if (snapshot != null) {
directoryUrlString.append("?snapshot=").append(snapshot);
}
return directoryUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Constructs a ShareFileAsyncClient that interacts with the specified file.
*
* <p>If the file doesn't exist in this directory {@link ShareFileAsyncClient | class ShareDirectoryAsyncClient {
private final ClientLogger logger = new ClientLogger(ShareDirectoryAsyncClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String directoryPath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
/**
* Creates a ShareDirectoryAsyncClient that sends requests to the storage directory at {@link
* AzureFileStorageImpl
* {@code client}.
*
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param directoryPath Name of the directory
* @param snapshot The snapshot of the share
*/
ShareDirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath,
String snapshot, String accountName, ShareServiceVersion serviceVersion) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(directoryPath);
this.shareName = shareName;
this.directoryPath = directoryPath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
}
/**
* Get the url of the storage directory client.
*
* @return the URL of the storage directory client
*/
public String getDirectoryUrl() {
StringBuilder directoryUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(directoryPath);
if (snapshot != null) {
directoryUrlString.append("?snapshot=").append(snapshot);
}
return directoryUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Constructs a ShareFileAsyncClient that interacts with the specified file.
*
* <p>If the file doesn't exist in this directory {@link ShareFileAsyncClient |
Change the logic here to if resourceGroup exists, get the resource group. If resourceGroup not exist, create an resource group. By doing this, the entire test run only needs one resource group, so it does not need to delete every test class, which answered your question below. | public void createResourceGroup() {
if (resourceGroup == null) {
String resourceGroupName = Configuration.getGlobalConfiguration().get(AZURE_RESOURCEGROUP_NAME);
System.out.println("Creating Resource Group: " + resourceGroupName);
resourceGroup = azure.resourceGroups()
.define(resourceGroupName)
.withRegion(location)
.create();
}
} | public void createResourceGroup() {
String resourceGroupName = Configuration.getGlobalConfiguration().get(AZURE_RESOURCEGROUP_NAME);
if (azure.resourceGroups().checkExistence(resourceGroupName)) {
System.out.println("Fetching Resource Group: " + resourceGroupName);
resourceGroup = azure.resourceGroups()
.getByName(resourceGroupName);
} else {
System.out.println("Creating Resource Group: " + resourceGroupName);
resourceGroup = azure.resourceGroups()
.define(resourceGroupName)
.withRegion(location)
.create();
}
} | class variables
* to be retrieved later.
*/
public void initialize() {
validate();
if (azure == null) {
azure = Azure.configure()
.authenticate(azureTokenCredentials)
.withSubscription(subscriptionId);
}
} | class variables
* to be retrieved later.
*/
public void initialize() {
validate();
if (azure == null) {
azure = Azure.configure()
.authenticate(azureTokenCredentials)
.withSubscription(subscriptionId);
}
} | |
One resource group for the entire tests, so there is no need to delete for every test class | public static void afterAll() {
} | public static void afterAll() {
} | class SearchServiceTestBase extends TestBase {
private static final String DEFAULT_DNS_SUFFIX = "search.windows.net";
private static final String DOGFOOD_DNS_SUFFIX = "search-dogfood.windows-int.net";
private static final String FAKE_DESCRIPTION = "Some data source";
private static final String AZURE_TEST_MODE = "AZURE_TEST_MODE";
private static final String AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND =
"Server=tcp:azs-playground.database.windows.net,1433;Database=usgs;User ID=reader;Password=EdrERBt3j6mZDP;Trusted_Connection=False;Encrypt=True;Connection Timeout=30;";
private static final ObjectMapper OBJECT_MAPPER;
static {
OBJECT_MAPPER = new ObjectMapper();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
df.setTimeZone(TimeZone.getDefault());
OBJECT_MAPPER.setDateFormat(df);
OBJECT_MAPPER.registerModule(new JavaTimeModule());
OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
static final String HOTEL_INDEX_NAME = "hotels";
static final String BLOB_DATASOURCE_NAME = "azs-java-live-blob";
static final String BLOB_DATASOURCE_TEST_NAME = "azs-java-test-blob";
static final String SQL_DATASOURCE_NAME = "azs-java-test-sql";
private String searchServiceName;
private String searchDnsSuffix;
protected String endpoint;
SearchApiKeyCredential searchApiKeyCredential;
private static String testEnvironment;
private static AzureSearchResources azureSearchResources;
@Rule
public TestName testName = new TestName();
@BeforeAll
public static void beforeAll() {
initializeAzureResources();
azureSearchResources.createResourceGroup();
}
@AfterAll
@Override
protected void beforeTest() {
searchDnsSuffix = testEnvironment.equals("DOGFOOD") ? DOGFOOD_DNS_SUFFIX : DEFAULT_DNS_SUFFIX;
if (!interceptorManager.isPlaybackMode()) {
azureSearchResources.initialize();
azureSearchResources.createService(testResourceNamer);
searchApiKeyCredential = new SearchApiKeyCredential(azureSearchResources.getSearchAdminKey());
}
searchServiceName = azureSearchResources.getSearchServiceName();
endpoint = String.format("https:
}
@Override
protected void afterTest() {
super.afterTest();
azureSearchResources.deleteService();
}
protected SearchServiceClientBuilder getSearchServiceClientBuilder() {
return getSearchServiceClientBuilderWithHttpPipelinePolicies(null);
}
/**
* Provides a way to inject custom HTTP pipeline policies before the client is instantiated
*
* @param policies the additional HTTP pipeline policies
* @return {@link SearchServiceClientBuilder}
*/
SearchServiceClientBuilder getSearchServiceClientBuilderWithHttpPipelinePolicies(
List<HttpPipelinePolicy> policies) {
SearchServiceClientBuilder builder = new SearchServiceClientBuilder()
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
addPolicies(builder, policies);
return builder;
}
addPolicies(builder, policies);
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private void addPolicies(SearchServiceClientBuilder builder, List<HttpPipelinePolicy> policies) {
if (policies != null && policies.size() > 0) {
for (HttpPipelinePolicy policy : policies) {
builder.addPolicy(policy);
}
}
}
Index createTestIndex() {
Map<String, Double> weights = new HashMap<>();
weights.put("Description", 1.5);
weights.put("Category", 2.0);
return new Index()
.setName(HOTEL_INDEX_NAME)
.setFields(Arrays.asList(
new Field()
.setName("HotelId")
.setType(DataType.EDM_STRING)
.setKey(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("HotelName")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description_Custom")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setSearchAnalyzer(AnalyzerName.STOP.toString())
.setIndexAnalyzer(AnalyzerName.STOP.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Category")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("ParkingIncluded")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("LastRenovationDate")
.setType(DataType.EDM_DATE_TIME_OFFSET)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rating")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Address")
.setType(DataType.EDM_COMPLEX_TYPE)
.setFields(Arrays.asList(
new Field()
.setName("StreetAddress")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("City")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("StateProvince")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Country")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("PostalCode")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("Location")
.setType(DataType.EDM_GEOGRAPHY_POINT)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rooms")
.setType(DataType.Collection(DataType.EDM_COMPLEX_TYPE))
.setFields(Arrays.asList(
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString()),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString()),
new Field()
.setName("Type")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BaseRate")
.setType(DataType.EDM_DOUBLE)
.setKey(Boolean.FALSE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BedOptions")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SleepsCount")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("TotalGuests")
.setType(DataType.EDM_INT64)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE),
new Field()
.setName("ProfitMargin")
.setType(DataType.EDM_DOUBLE)
)
)
.setScoringProfiles(Arrays.asList(
new ScoringProfile()
.setName("MyProfile")
.setFunctionAggregation(ScoringFunctionAggregation.AVERAGE)
.setFunctions(Arrays.asList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(4)
.setShouldBoostBeyondRangeByConstant(true))
.setFieldName("Rating")
.setBoost(2.0)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT),
new DistanceScoringFunction()
.setParameters(new DistanceScoringParameters()
.setBoostingDistance(5)
.setReferencePointParameter("Loc"))
.setFieldName("Location")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR),
new FreshnessScoringFunction()
.setParameters(new FreshnessScoringParameters()
.setBoostingDuration(Duration.ofDays(365)))
.setFieldName("LastRenovationDate")
.setBoost(1.1)
.setInterpolation(ScoringFunctionInterpolation.LOGARITHMIC)
))
.setTextWeights(new TextWeights()
.setWeights(weights)),
new ScoringProfile()
.setName("ProfileTwo")
.setFunctionAggregation(ScoringFunctionAggregation.MAXIMUM)
.setFunctions(Collections.singletonList(
new TagScoringFunction()
.setParameters(new TagScoringParameters().setTagsParameter("MyTags"))
.setFieldName("Tags")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR)
)),
new ScoringProfile()
.setName("ProfileThree")
.setFunctionAggregation(ScoringFunctionAggregation.MINIMUM)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(0)
.setBoostingRangeEnd(10)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.0)
.setInterpolation(ScoringFunctionInterpolation.QUADRATIC)
)),
new ScoringProfile()
.setName("ProfileFour")
.setFunctionAggregation(ScoringFunctionAggregation.FIRST_MATCHING)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(5)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.14)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT)
))
))
.setDefaultScoringProfile("MyProfile")
.setCorsOptions(new CorsOptions()
.setAllowedOrigins("http:
.setMaxAgeInSeconds(60L))
.setSuggesters(Collections.singletonList(new Suggester()
.setName("FancySuggester")
.setSourceFields(Collections.singletonList("HotelName"))));
}
DataSource createTestSqlDataSourceObject(DataDeletionDetectionPolicy deletionDetectionPolicy,
DataChangeDetectionPolicy changeDetectionPolicy) {
return DataSources.azureSql(
SearchServiceTestBase.SQL_DATASOURCE_NAME,
AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND,
"GeoNamesRI",
FAKE_DESCRIPTION,
changeDetectionPolicy,
deletionDetectionPolicy
);
}
DataSource createTestSqlDataSourceObject() {
return createTestSqlDataSourceObject(null, null);
}
/**
* create a new blob data source object
* @return the created data source
*/
DataSource createBlobDataSource() {
String storageConnString = "connectionString";
String blobContainerDatasourceName = "container";
if (!interceptorManager.isPlaybackMode()) {
storageConnString = azureSearchResources.createStorageAccount(testResourceNamer);
blobContainerDatasourceName =
azureSearchResources.createBlobContainer(storageConnString, testResourceNamer);
}
return DataSources.azureBlobStorage(
BLOB_DATASOURCE_NAME,
storageConnString,
blobContainerDatasourceName,
"/",
"real live blob",
new SoftDeleteColumnDeletionDetectionPolicy()
.setSoftDeleteColumnName("fieldName")
.setSoftDeleteMarkerValue("someValue")
);
}
private static void initializeAzureResources() {
String appId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String azureDomainId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TENANT_ID);
String secret = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
testEnvironment = Configuration.getGlobalConfiguration().get("AZURE_TEST_ENVIRONMENT");
testEnvironment = (testEnvironment == null) ? "AZURE" : testEnvironment.toUpperCase(Locale.US);
AzureEnvironment environment = testEnvironment.equals("DOGFOOD") ? getDogfoodEnvironment() : AzureEnvironment.AZURE;
ApplicationTokenCredentials applicationTokenCredentials =
new ApplicationTokenCredentials(appId, azureDomainId, secret, environment);
azureSearchResources = new AzureSearchResources(applicationTokenCredentials, subscriptionId, Region.US_WEST2);
}
private static AzureEnvironment getDogfoodEnvironment() {
HashMap<String, String> configuration = new HashMap<>();
configuration.put("portalUrl", "http:
configuration.put("managementEndpointUrl", "https:
configuration.put("resourceManagerEndpointUrl", "https:
configuration.put("activeDirectoryEndpointUrl", "https:
configuration.put("activeDirectoryResourceId", "https:
configuration.put("activeDirectoryGraphResourceId", "https:
configuration.put("activeDirectoryGraphApiVersion", "2013-04-05");
return new AzureEnvironment(configuration);
}
protected SearchIndexClientBuilder getSearchIndexClientBuilder(String indexName) {
SearchIndexClientBuilder builder = new SearchIndexClientBuilder()
.endpoint(String.format("https:
.indexName(indexName);
if (interceptorManager.isPlaybackMode()) {
return builder.httpClient(interceptorManager.getPlaybackClient());
}
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected void waitForIndexing() {
sleepIfRunningAgainstService(2000);
}
/**
* If the document schema is known, user can convert the properties to a specific object type
*
* @param cls Class type of the document object to convert to
* @param <T> type
* @return an object of the request type
*/
static <T> T convertToType(Object document, Class<T> cls) {
return OBJECT_MAPPER.convertValue(document, cls);
}
void addFieldToIndex(Index index, Field field) {
List<Field> fields = new ArrayList<>(index.getFields());
fields.add(field);
index.setFields(fields);
}
/**
* Constructs a request options object with client request Id.
* @return a RequestOptions object with ClientRequestId.
*/
protected RequestOptions generateRequestOptions() {
return new RequestOptions().setClientRequestId(UUID.randomUUID());
}
void assertHttpResponseException(Runnable exceptionThrower, HttpResponseStatus expectedResponseStatus,
String expectedMessage) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
verifyHttpResponseError(ex, expectedResponseStatus, expectedMessage);
}
}
void assertHttpResponseExceptionAsync(Publisher<?> exceptionThrower) {
StepVerifier.create(exceptionThrower)
.verifyErrorSatisfies(error -> verifyHttpResponseError(error, HttpResponseStatus.BAD_REQUEST,
"Invalid expression: Could not find a property named 'ThisFieldDoesNotExist' on type 'search.document'."));
}
private void verifyHttpResponseError(
Throwable ex, HttpResponseStatus expectedResponseStatus, String expectedMessage) {
assertEquals(HttpResponseException.class, ex.getClass());
if (expectedResponseStatus != null) {
assertEquals(
expectedResponseStatus.code(),
((HttpResponseException) ex).getResponse().getStatusCode());
}
if (expectedMessage != null) {
assertTrue(ex.getMessage().contains(expectedMessage));
}
}
ServiceStatistics getExpectedServiceStatistics() {
ServiceCounters serviceCounters = new ServiceCounters()
.setDocumentCounter(new ResourceCounter().setUsage(0).setQuota(null))
.setIndexCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setIndexerCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setDataSourceCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setStorageSizeCounter(new ResourceCounter().setUsage(0).setQuota(52428800L))
.setSynonymMapCounter(new ResourceCounter().setUsage(0).setQuota(3L));
ServiceLimits serviceLimits = new ServiceLimits()
.setMaxFieldsPerIndex(1000)
.setMaxFieldNestingDepthPerIndex(10)
.setMaxComplexCollectionFieldsPerIndex(40)
.setMaxComplexObjectsInCollectionsPerDocument(3000);
return new ServiceStatistics()
.setCounters(serviceCounters)
.setLimits(serviceLimits);
}
static boolean liveMode() {
return setupTestMode() == TestMode.LIVE;
}
static TestMode setupTestMode() {
String testMode = Configuration.getGlobalConfiguration().get(AZURE_TEST_MODE);
if (testMode != null) {
try {
return TestMode.valueOf(testMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException ignore) {
return TestMode.PLAYBACK;
}
}
return TestMode.PLAYBACK;
}
} | class SearchServiceTestBase extends TestBase {
private static final String DEFAULT_DNS_SUFFIX = "search.windows.net";
private static final String DOGFOOD_DNS_SUFFIX = "search-dogfood.windows-int.net";
private static final String FAKE_DESCRIPTION = "Some data source";
private static final String AZURE_TEST_MODE = "AZURE_TEST_MODE";
private static final String AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND =
"Server=tcp:azs-playground.database.windows.net,1433;Database=usgs;User ID=reader;Password=EdrERBt3j6mZDP;Trusted_Connection=False;Encrypt=True;Connection Timeout=30;";
private static final ObjectMapper OBJECT_MAPPER;
static {
OBJECT_MAPPER = new ObjectMapper();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
df.setTimeZone(TimeZone.getDefault());
OBJECT_MAPPER.setDateFormat(df);
OBJECT_MAPPER.registerModule(new JavaTimeModule());
OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
static final String HOTEL_INDEX_NAME = "hotels";
static final String BLOB_DATASOURCE_NAME = "azs-java-live-blob";
static final String BLOB_DATASOURCE_TEST_NAME = "azs-java-test-blob";
static final String SQL_DATASOURCE_NAME = "azs-java-test-sql";
private String searchServiceName;
private String searchDnsSuffix;
protected String endpoint;
SearchApiKeyCredential searchApiKeyCredential;
private static String testEnvironment;
private static AzureSearchResources azureSearchResources;
@Rule
public TestName testName = new TestName();
@BeforeAll
public static void beforeAll() {
initializeAzureResources();
if (!playbackMode()) {
azureSearchResources.initialize();
azureSearchResources.createResourceGroup();
}
}
@AfterAll
@Override
protected void beforeTest() {
searchDnsSuffix = testEnvironment.equals("DOGFOOD") ? DOGFOOD_DNS_SUFFIX : DEFAULT_DNS_SUFFIX;
if (!interceptorManager.isPlaybackMode()) {
azureSearchResources.createService(testResourceNamer);
searchApiKeyCredential = new SearchApiKeyCredential(azureSearchResources.getSearchAdminKey());
}
searchServiceName = azureSearchResources.getSearchServiceName();
endpoint = String.format("https:
}
@Override
protected void afterTest() {
super.afterTest();
azureSearchResources.deleteService();
}
protected SearchServiceClientBuilder getSearchServiceClientBuilder() {
return getSearchServiceClientBuilderWithHttpPipelinePolicies(null);
}
/**
* Provides a way to inject custom HTTP pipeline policies before the client is instantiated
*
* @param policies the additional HTTP pipeline policies
* @return {@link SearchServiceClientBuilder}
*/
SearchServiceClientBuilder getSearchServiceClientBuilderWithHttpPipelinePolicies(
List<HttpPipelinePolicy> policies) {
SearchServiceClientBuilder builder = new SearchServiceClientBuilder()
.endpoint(endpoint);
if (interceptorManager.isPlaybackMode()) {
builder.httpClient(interceptorManager.getPlaybackClient());
addPolicies(builder, policies);
return builder;
}
addPolicies(builder, policies);
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
private void addPolicies(SearchServiceClientBuilder builder, List<HttpPipelinePolicy> policies) {
if (policies != null && policies.size() > 0) {
for (HttpPipelinePolicy policy : policies) {
builder.addPolicy(policy);
}
}
}
Index createTestIndex() {
Map<String, Double> weights = new HashMap<>();
weights.put("Description", 1.5);
weights.put("Category", 2.0);
return new Index()
.setName(HOTEL_INDEX_NAME)
.setFields(Arrays.asList(
new Field()
.setName("HotelId")
.setType(DataType.EDM_STRING)
.setKey(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("HotelName")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Description_Custom")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setSearchAnalyzer(AnalyzerName.STOP.toString())
.setIndexAnalyzer(AnalyzerName.STOP.toString())
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Category")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("ParkingIncluded")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("LastRenovationDate")
.setType(DataType.EDM_DATE_TIME_OFFSET)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rating")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Address")
.setType(DataType.EDM_COMPLEX_TYPE)
.setFields(Arrays.asList(
new Field()
.setName("StreetAddress")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("City")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("StateProvince")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Country")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("PostalCode")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("Location")
.setType(DataType.EDM_GEOGRAPHY_POINT)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Rooms")
.setType(DataType.Collection(DataType.EDM_COMPLEX_TYPE))
.setFields(Arrays.asList(
new Field()
.setName("Description")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.EN_LUCENE.toString()),
new Field()
.setName("DescriptionFr")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
.setAnalyzer(AnalyzerName.FR_LUCENE.toString()),
new Field()
.setName("Type")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BaseRate")
.setType(DataType.EDM_DOUBLE)
.setKey(Boolean.FALSE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("BedOptions")
.setType(DataType.EDM_STRING)
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SleepsCount")
.setType(DataType.EDM_INT32)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("SmokingAllowed")
.setType(DataType.EDM_BOOLEAN)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE),
new Field()
.setName("Tags")
.setType(DataType.Collection(DataType.EDM_STRING))
.setSearchable(Boolean.TRUE)
.setFilterable(Boolean.TRUE)
.setFacetable(Boolean.TRUE)
.setRetrievable(Boolean.TRUE)
)
),
new Field()
.setName("TotalGuests")
.setType(DataType.EDM_INT64)
.setFilterable(Boolean.TRUE)
.setSortable(Boolean.TRUE)
.setFacetable(Boolean.TRUE),
new Field()
.setName("ProfitMargin")
.setType(DataType.EDM_DOUBLE)
)
)
.setScoringProfiles(Arrays.asList(
new ScoringProfile()
.setName("MyProfile")
.setFunctionAggregation(ScoringFunctionAggregation.AVERAGE)
.setFunctions(Arrays.asList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(4)
.setShouldBoostBeyondRangeByConstant(true))
.setFieldName("Rating")
.setBoost(2.0)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT),
new DistanceScoringFunction()
.setParameters(new DistanceScoringParameters()
.setBoostingDistance(5)
.setReferencePointParameter("Loc"))
.setFieldName("Location")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR),
new FreshnessScoringFunction()
.setParameters(new FreshnessScoringParameters()
.setBoostingDuration(Duration.ofDays(365)))
.setFieldName("LastRenovationDate")
.setBoost(1.1)
.setInterpolation(ScoringFunctionInterpolation.LOGARITHMIC)
))
.setTextWeights(new TextWeights()
.setWeights(weights)),
new ScoringProfile()
.setName("ProfileTwo")
.setFunctionAggregation(ScoringFunctionAggregation.MAXIMUM)
.setFunctions(Collections.singletonList(
new TagScoringFunction()
.setParameters(new TagScoringParameters().setTagsParameter("MyTags"))
.setFieldName("Tags")
.setBoost(1.5)
.setInterpolation(ScoringFunctionInterpolation.LINEAR)
)),
new ScoringProfile()
.setName("ProfileThree")
.setFunctionAggregation(ScoringFunctionAggregation.MINIMUM)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(0)
.setBoostingRangeEnd(10)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.0)
.setInterpolation(ScoringFunctionInterpolation.QUADRATIC)
)),
new ScoringProfile()
.setName("ProfileFour")
.setFunctionAggregation(ScoringFunctionAggregation.FIRST_MATCHING)
.setFunctions(Collections.singletonList(
new MagnitudeScoringFunction()
.setParameters(new MagnitudeScoringParameters()
.setBoostingRangeStart(1)
.setBoostingRangeEnd(5)
.setShouldBoostBeyondRangeByConstant(false))
.setFieldName("Rating")
.setBoost(3.14)
.setInterpolation(ScoringFunctionInterpolation.CONSTANT)
))
))
.setDefaultScoringProfile("MyProfile")
.setCorsOptions(new CorsOptions()
.setAllowedOrigins("http:
.setMaxAgeInSeconds(60L))
.setSuggesters(Collections.singletonList(new Suggester()
.setName("FancySuggester")
.setSourceFields(Collections.singletonList("HotelName"))));
}
DataSource createTestSqlDataSourceObject(DataDeletionDetectionPolicy deletionDetectionPolicy,
DataChangeDetectionPolicy changeDetectionPolicy) {
return DataSources.azureSql(
SearchServiceTestBase.SQL_DATASOURCE_NAME,
AZURE_SQL_CONN_STRING_READONLY_PLAYGROUND,
"GeoNamesRI",
FAKE_DESCRIPTION,
changeDetectionPolicy,
deletionDetectionPolicy
);
}
DataSource createTestSqlDataSourceObject() {
return createTestSqlDataSourceObject(null, null);
}
/**
* create a new blob data source object
* @return the created data source
*/
DataSource createBlobDataSource() {
String storageConnString = "connectionString";
String blobContainerDatasourceName = "container";
if (!interceptorManager.isPlaybackMode()) {
storageConnString = azureSearchResources.createStorageAccount(testResourceNamer);
blobContainerDatasourceName =
azureSearchResources.createBlobContainer(storageConnString, testResourceNamer);
}
return DataSources.azureBlobStorage(
BLOB_DATASOURCE_NAME,
storageConnString,
blobContainerDatasourceName,
"/",
"real live blob",
new SoftDeleteColumnDeletionDetectionPolicy()
.setSoftDeleteColumnName("fieldName")
.setSoftDeleteMarkerValue("someValue")
);
}
private static void initializeAzureResources() {
String appId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
String azureDomainId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TENANT_ID);
String secret = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_SECRET);
String subscriptionId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID);
testEnvironment = Configuration.getGlobalConfiguration().get("AZURE_TEST_ENVIRONMENT");
testEnvironment = (testEnvironment == null) ? "AZURE" : testEnvironment.toUpperCase(Locale.US);
AzureEnvironment environment = testEnvironment.equals("DOGFOOD") ? getDogfoodEnvironment() : AzureEnvironment.AZURE;
ApplicationTokenCredentials applicationTokenCredentials =
new ApplicationTokenCredentials(appId, azureDomainId, secret, environment);
azureSearchResources = new AzureSearchResources(applicationTokenCredentials, subscriptionId, Region.US_WEST2);
}
private static AzureEnvironment getDogfoodEnvironment() {
HashMap<String, String> configuration = new HashMap<>();
configuration.put("portalUrl", "http:
configuration.put("managementEndpointUrl", "https:
configuration.put("resourceManagerEndpointUrl", "https:
configuration.put("activeDirectoryEndpointUrl", "https:
configuration.put("activeDirectoryResourceId", "https:
configuration.put("activeDirectoryGraphResourceId", "https:
configuration.put("activeDirectoryGraphApiVersion", "2013-04-05");
return new AzureEnvironment(configuration);
}
protected SearchIndexClientBuilder getSearchIndexClientBuilder(String indexName) {
SearchIndexClientBuilder builder = new SearchIndexClientBuilder()
.endpoint(String.format("https:
.indexName(indexName);
if (interceptorManager.isPlaybackMode()) {
return builder.httpClient(interceptorManager.getPlaybackClient());
}
builder.httpClient(new NettyAsyncHttpClientBuilder().wiretap(true).build())
.credential(searchApiKeyCredential);
if (!liveMode()) {
builder.addPolicy(interceptorManager.getRecordPolicy());
}
return builder;
}
protected void waitForIndexing() {
sleepIfRunningAgainstService(2000);
}
/**
* If the document schema is known, user can convert the properties to a specific object type
*
* @param cls Class type of the document object to convert to
* @param <T> type
* @return an object of the request type
*/
static <T> T convertToType(Object document, Class<T> cls) {
return OBJECT_MAPPER.convertValue(document, cls);
}
void addFieldToIndex(Index index, Field field) {
List<Field> fields = new ArrayList<>(index.getFields());
fields.add(field);
index.setFields(fields);
}
/**
* Constructs a request options object with client request Id.
* @return a RequestOptions object with ClientRequestId.
*/
protected RequestOptions generateRequestOptions() {
return new RequestOptions().setClientRequestId(UUID.randomUUID());
}
void assertHttpResponseException(Runnable exceptionThrower, HttpResponseStatus expectedResponseStatus,
String expectedMessage) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
verifyHttpResponseError(ex, expectedResponseStatus, expectedMessage);
}
}
void assertHttpResponseExceptionAsync(Publisher<?> exceptionThrower) {
StepVerifier.create(exceptionThrower)
.verifyErrorSatisfies(error -> verifyHttpResponseError(error, HttpResponseStatus.BAD_REQUEST,
"Invalid expression: Could not find a property named 'ThisFieldDoesNotExist' on type 'search.document'."));
}
private void verifyHttpResponseError(
Throwable ex, HttpResponseStatus expectedResponseStatus, String expectedMessage) {
assertEquals(HttpResponseException.class, ex.getClass());
if (expectedResponseStatus != null) {
assertEquals(
expectedResponseStatus.code(),
((HttpResponseException) ex).getResponse().getStatusCode());
}
if (expectedMessage != null) {
assertTrue(ex.getMessage().contains(expectedMessage));
}
}
ServiceStatistics getExpectedServiceStatistics() {
ServiceCounters serviceCounters = new ServiceCounters()
.setDocumentCounter(new ResourceCounter().setUsage(0).setQuota(null))
.setIndexCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setIndexerCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setDataSourceCounter(new ResourceCounter().setUsage(0).setQuota(3L))
.setStorageSizeCounter(new ResourceCounter().setUsage(0).setQuota(52428800L))
.setSynonymMapCounter(new ResourceCounter().setUsage(0).setQuota(3L));
ServiceLimits serviceLimits = new ServiceLimits()
.setMaxFieldsPerIndex(1000)
.setMaxFieldNestingDepthPerIndex(10)
.setMaxComplexCollectionFieldsPerIndex(40)
.setMaxComplexObjectsInCollectionsPerDocument(3000);
return new ServiceStatistics()
.setCounters(serviceCounters)
.setLimits(serviceLimits);
}
static boolean liveMode() {
return setupTestMode() == TestMode.LIVE;
}
static boolean playbackMode() {
return setupTestMode() == TestMode.PLAYBACK;
}
static TestMode setupTestMode() {
String testMode = Configuration.getGlobalConfiguration().get(AZURE_TEST_MODE);
if (testMode != null) {
try {
return TestMode.valueOf(testMode.toUpperCase(Locale.US));
} catch (IllegalArgumentException ignore) {
return TestMode.PLAYBACK;
}
}
return TestMode.PLAYBACK;
}
} | |
Shouldn't need to try catch here given the overload being called already does this. | public Mono<Boolean> exists() {
try {
return existsWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
} | } | public Mono<Boolean> exists() {
return existsWithResponse().flatMap(FluxUtil::toMono);
} | class ShareAsyncClient {
private final ClientLogger logger = new ClientLogger(ShareAsyncClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
/**
* Creates a ShareAsyncClient that sends requests to the storage share at {@link AzureFileStorageImpl
* endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the
* {@code azureFileStorageClient}.
*
* @param client Client that interacts with the service interfaces
* @param shareName Name of the share
*/
ShareAsyncClient(AzureFileStorageImpl client, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = client;
this.serviceVersion = serviceVersion;
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?snapshot=").append(snapshot);
}
return shareUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient
* azureFileStorageClient will need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share
*/
public ShareDirectoryAsyncClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryAsyncClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient
* azureFileStorageClient will need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryAsyncClient} that interacts with the directory in the share
*/
public ShareDirectoryAsyncClient getDirectoryClient(String directoryName) {
return new ShareDirectoryAsyncClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion);
}
/**
* Constructs a {@link ShareFileAsyncClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileAsyncClient | class ShareAsyncClient {
private final ClientLogger logger = new ClientLogger(ShareAsyncClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
/**
* Creates a ShareAsyncClient that sends requests to the storage share at {@link AzureFileStorageImpl
* endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the
* {@code azureFileStorageClient}.
*
* @param client Client that interacts with the service interfaces
* @param shareName Name of the share
*/
ShareAsyncClient(AzureFileStorageImpl client, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = client;
this.serviceVersion = serviceVersion;
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?snapshot=").append(snapshot);
}
return shareUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient
* azureFileStorageClient will need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share
*/
public ShareDirectoryAsyncClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryAsyncClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient
* azureFileStorageClient will need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryAsyncClient} that interacts with the directory in the share
*/
public ShareDirectoryAsyncClient getDirectoryClient(String directoryName) {
return new ShareDirectoryAsyncClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion);
}
/**
* Constructs a {@link ShareFileAsyncClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileAsyncClient |
might at least be helpful to make it a constant so we dont have to dig around for them later. ya know? | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(30));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | pollResponse.waitForCompletion(Duration.ofSeconds(30)); | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} |
I know this isnt part of this particular PR but do you think it might be useful to catch the specific Error Code as well here? | public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
} | if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) { | public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(30));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} |
Would it be helpful to a user to separate these checks so they know which is illegal? | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(30));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination " | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} |
Is there a reason for the duration being 30 here? | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(30));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | pollResponse.waitForCompletion(Duration.ofSeconds(30)); | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} |
I actually just refactored this to do exactly that :). It'll be in my next PR. | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(30));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination " | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} |
Nope. Just a magic number plus being a little lazy about adding a constant/option. I figure we can add an option for it if/when people are dissatisfied with this. Or I can add an option now. :P | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(30));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | pollResponse.waitForCompletion(Duration.ofSeconds(30)); | public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
if (!(source instanceof AzurePath && destination instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
if (source.equals(destination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
if (source.equals(source.getRoot()) || destination.equals(destination.getRoot())) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Neither source nor destination "
+ "can be just a root directory. Source: %s. Destination: %s.", source.toString(),
destination.toString())));
}
BlobClient sourceBlob = ((AzurePath) source).toBlobClient();
BlobClient destinationBlob = ((AzurePath) destination).toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(destination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(destination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(destination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + destination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = ((AzurePath) path).toBlobClient();
Path root = path.getRoot();
if (root != null && root.equals(path)) {
throw Utility.logError(logger, new IOException("Creating a root directory is not supported."));
}
if (checkParentDirectoryExists(path)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(path.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ path.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* {@inheritDoc}
*/
@Override
public void delete(Path path) throws IOException {
}
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
} |
Indenttaion | public AsyncDocumentClient build() {
ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
&& this.cosmosAuthorizationTokenResolver == null && this.cosmosKeyCredential == null,
"cannot buildAsyncClient client without any one of masterKey, " +
"resource token, permissionFeed, tokenResolver and cosmos key credential");
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey()),
"cannot buildAsyncClient client without key credential");
RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
desiredConsistencyLevel,
configs,
cosmosAuthorizationTokenResolver,
cosmosKeyCredential,
sessionCapturingOverride,
transportClientSharing);
client.init();
return client;
} | cosmosAuthorizationTokenResolver, | public AsyncDocumentClient build() {
ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
&& this.cosmosAuthorizationTokenResolver == null && this.cosmosKeyCredential == null,
"cannot buildAsyncClient client without any one of masterKey, " +
"resource token, permissionFeed, tokenResolver and cosmos key credential");
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey()),
"cannot buildAsyncClient client without key credential");
RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
desiredConsistencyLevel,
configs,
cosmosAuthorizationTokenResolver,
cosmosKeyCredential,
sessionCapturingOverride,
transportClientSharing);
client.init();
return client;
} | class Builder {
Configs configs = new Configs();
ConnectionPolicy connectionPolicy;
ConsistencyLevel desiredConsistencyLevel;
List<Permission> permissionFeed;
String masterKeyOrResourceToken;
URI serviceEndpoint;
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
CosmosKeyCredential cosmosKeyCredential;
boolean sessionCapturingOverride;
boolean transportClientSharing;
public Builder withServiceEndpoint(String serviceEndpoint) {
try {
this.serviceEndpoint = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getMessage());
}
return this;
}
/**
* New method withMasterKeyOrResourceToken will take either master key or resource token
* and perform authentication for accessing resource.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
* @deprecated use {@link
*/
@Deprecated
public Builder withMasterKey(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the master key , additionally it can also consume
* resource token too for authentication.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
*/
public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the permission list , which contains the
* resource tokens needed to access resources.
*
* @param permissionFeed Permission list for authentication.
* @return current Builder.
*/
public Builder withPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
public Builder withConfigs(Configs configs) {
this.configs = configs;
return this;
}
public Builder withSessionCapturingOverride(boolean sessionCapturingOverride) {
this.sessionCapturingOverride = sessionCapturingOverride;
return this;
}
public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
return this;
}
public Builder withTransportClientSharing(boolean transportClientSharing) {
this.transportClientSharing = transportClientSharing;
return this;
}
public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) {
if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey())) {
throw new IllegalArgumentException("Cannot buildAsyncClient client with empty key credential");
}
this.cosmosKeyCredential = cosmosKeyCredential;
return this;
}
/**
* This method will accept functional interface TokenResolver which helps in generation authorization
* token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed.
* @param cosmosAuthorizationTokenResolver The tokenResolver
* @return current Builder.
*/
public Builder withTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
public Configs getConfigs() {
return configs;
}
public void setConfigs(Configs configs) {
this.configs = configs;
}
public ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
public void setConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
}
public ConsistencyLevel getDesiredConsistencyLevel() {
return desiredConsistencyLevel;
}
public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
}
public List<Permission> getPermissionFeed() {
return permissionFeed;
}
public void setPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
}
public String getMasterKeyOrResourceToken() {
return masterKeyOrResourceToken;
}
public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
}
public URI getServiceEndpoint() {
return serviceEndpoint;
}
public void setServiceEndpoint(URI serviceEndpoint) {
this.serviceEndpoint = serviceEndpoint;
}
public CosmosAuthorizationTokenResolver getCosmosAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
public void setCosmosAuthorizationTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public CosmosKeyCredential getCosmosKeyCredential() {
return cosmosKeyCredential;
}
} | class Builder {
Configs configs = new Configs();
ConnectionPolicy connectionPolicy;
ConsistencyLevel desiredConsistencyLevel;
List<Permission> permissionFeed;
String masterKeyOrResourceToken;
URI serviceEndpoint;
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
CosmosKeyCredential cosmosKeyCredential;
boolean sessionCapturingOverride;
boolean transportClientSharing;
public Builder withServiceEndpoint(String serviceEndpoint) {
try {
this.serviceEndpoint = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getMessage());
}
return this;
}
/**
* New method withMasterKeyOrResourceToken will take either master key or resource token
* and perform authentication for accessing resource.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
* @deprecated use {@link
*/
@Deprecated
public Builder withMasterKey(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the master key , additionally it can also consume
* resource token too for authentication.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
*/
public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the permission list , which contains the
* resource tokens needed to access resources.
*
* @param permissionFeed Permission list for authentication.
* @return current Builder.
*/
public Builder withPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
public Builder withConfigs(Configs configs) {
this.configs = configs;
return this;
}
public Builder withSessionCapturingOverride(boolean sessionCapturingOverride) {
this.sessionCapturingOverride = sessionCapturingOverride;
return this;
}
public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
return this;
}
public Builder withTransportClientSharing(boolean transportClientSharing) {
this.transportClientSharing = transportClientSharing;
return this;
}
public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) {
if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey())) {
throw new IllegalArgumentException("Cannot buildAsyncClient client with empty key credential");
}
this.cosmosKeyCredential = cosmosKeyCredential;
return this;
}
/**
* This method will accept functional interface TokenResolver which helps in generation authorization
* token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed.
* @param cosmosAuthorizationTokenResolver The tokenResolver
* @return current Builder.
*/
public Builder withTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
public Configs getConfigs() {
return configs;
}
public void setConfigs(Configs configs) {
this.configs = configs;
}
public ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
public void setConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
}
public ConsistencyLevel getDesiredConsistencyLevel() {
return desiredConsistencyLevel;
}
public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
}
public List<Permission> getPermissionFeed() {
return permissionFeed;
}
public void setPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
}
public String getMasterKeyOrResourceToken() {
return masterKeyOrResourceToken;
}
public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
}
public URI getServiceEndpoint() {
return serviceEndpoint;
}
public void setServiceEndpoint(URI serviceEndpoint) {
this.serviceEndpoint = serviceEndpoint;
}
public CosmosAuthorizationTokenResolver getCosmosAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
public void setCosmosAuthorizationTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public CosmosKeyCredential getCosmosKeyCredential() {
return cosmosKeyCredential;
}
} |
will fix. thanks. result of intellji auto rename. | public AsyncDocumentClient build() {
ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
&& this.cosmosAuthorizationTokenResolver == null && this.cosmosKeyCredential == null,
"cannot buildAsyncClient client without any one of masterKey, " +
"resource token, permissionFeed, tokenResolver and cosmos key credential");
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey()),
"cannot buildAsyncClient client without key credential");
RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
desiredConsistencyLevel,
configs,
cosmosAuthorizationTokenResolver,
cosmosKeyCredential,
sessionCapturingOverride,
transportClientSharing);
client.init();
return client;
} | cosmosAuthorizationTokenResolver, | public AsyncDocumentClient build() {
ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
&& this.cosmosAuthorizationTokenResolver == null && this.cosmosKeyCredential == null,
"cannot buildAsyncClient client without any one of masterKey, " +
"resource token, permissionFeed, tokenResolver and cosmos key credential");
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey()),
"cannot buildAsyncClient client without key credential");
RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
desiredConsistencyLevel,
configs,
cosmosAuthorizationTokenResolver,
cosmosKeyCredential,
sessionCapturingOverride,
transportClientSharing);
client.init();
return client;
} | class Builder {
Configs configs = new Configs();
ConnectionPolicy connectionPolicy;
ConsistencyLevel desiredConsistencyLevel;
List<Permission> permissionFeed;
String masterKeyOrResourceToken;
URI serviceEndpoint;
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
CosmosKeyCredential cosmosKeyCredential;
boolean sessionCapturingOverride;
boolean transportClientSharing;
public Builder withServiceEndpoint(String serviceEndpoint) {
try {
this.serviceEndpoint = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getMessage());
}
return this;
}
/**
* New method withMasterKeyOrResourceToken will take either master key or resource token
* and perform authentication for accessing resource.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
* @deprecated use {@link
*/
@Deprecated
public Builder withMasterKey(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the master key , additionally it can also consume
* resource token too for authentication.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
*/
public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the permission list , which contains the
* resource tokens needed to access resources.
*
* @param permissionFeed Permission list for authentication.
* @return current Builder.
*/
public Builder withPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
public Builder withConfigs(Configs configs) {
this.configs = configs;
return this;
}
public Builder withSessionCapturingOverride(boolean sessionCapturingOverride) {
this.sessionCapturingOverride = sessionCapturingOverride;
return this;
}
public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
return this;
}
public Builder withTransportClientSharing(boolean transportClientSharing) {
this.transportClientSharing = transportClientSharing;
return this;
}
public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) {
if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey())) {
throw new IllegalArgumentException("Cannot buildAsyncClient client with empty key credential");
}
this.cosmosKeyCredential = cosmosKeyCredential;
return this;
}
/**
* This method will accept functional interface TokenResolver which helps in generation authorization
* token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed.
* @param cosmosAuthorizationTokenResolver The tokenResolver
* @return current Builder.
*/
public Builder withTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
public Configs getConfigs() {
return configs;
}
public void setConfigs(Configs configs) {
this.configs = configs;
}
public ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
public void setConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
}
public ConsistencyLevel getDesiredConsistencyLevel() {
return desiredConsistencyLevel;
}
public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
}
public List<Permission> getPermissionFeed() {
return permissionFeed;
}
public void setPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
}
public String getMasterKeyOrResourceToken() {
return masterKeyOrResourceToken;
}
public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
}
public URI getServiceEndpoint() {
return serviceEndpoint;
}
public void setServiceEndpoint(URI serviceEndpoint) {
this.serviceEndpoint = serviceEndpoint;
}
public CosmosAuthorizationTokenResolver getCosmosAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
public void setCosmosAuthorizationTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public CosmosKeyCredential getCosmosKeyCredential() {
return cosmosKeyCredential;
}
} | class Builder {
Configs configs = new Configs();
ConnectionPolicy connectionPolicy;
ConsistencyLevel desiredConsistencyLevel;
List<Permission> permissionFeed;
String masterKeyOrResourceToken;
URI serviceEndpoint;
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
CosmosKeyCredential cosmosKeyCredential;
boolean sessionCapturingOverride;
boolean transportClientSharing;
public Builder withServiceEndpoint(String serviceEndpoint) {
try {
this.serviceEndpoint = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getMessage());
}
return this;
}
/**
* New method withMasterKeyOrResourceToken will take either master key or resource token
* and perform authentication for accessing resource.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
* @deprecated use {@link
*/
@Deprecated
public Builder withMasterKey(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the master key , additionally it can also consume
* resource token too for authentication.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
*/
public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the permission list , which contains the
* resource tokens needed to access resources.
*
* @param permissionFeed Permission list for authentication.
* @return current Builder.
*/
public Builder withPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
public Builder withConfigs(Configs configs) {
this.configs = configs;
return this;
}
public Builder withSessionCapturingOverride(boolean sessionCapturingOverride) {
this.sessionCapturingOverride = sessionCapturingOverride;
return this;
}
public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
return this;
}
public Builder withTransportClientSharing(boolean transportClientSharing) {
this.transportClientSharing = transportClientSharing;
return this;
}
public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) {
if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey())) {
throw new IllegalArgumentException("Cannot buildAsyncClient client with empty key credential");
}
this.cosmosKeyCredential = cosmosKeyCredential;
return this;
}
/**
* This method will accept functional interface TokenResolver which helps in generation authorization
* token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed.
* @param cosmosAuthorizationTokenResolver The tokenResolver
* @return current Builder.
*/
public Builder withTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
public Configs getConfigs() {
return configs;
}
public void setConfigs(Configs configs) {
this.configs = configs;
}
public ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
public void setConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
}
public ConsistencyLevel getDesiredConsistencyLevel() {
return desiredConsistencyLevel;
}
public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
}
public List<Permission> getPermissionFeed() {
return permissionFeed;
}
public void setPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
}
public String getMasterKeyOrResourceToken() {
return masterKeyOrResourceToken;
}
public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
}
public URI getServiceEndpoint() {
return serviceEndpoint;
}
public void setServiceEndpoint(URI serviceEndpoint) {
this.serviceEndpoint = serviceEndpoint;
}
public CosmosAuthorizationTokenResolver getCosmosAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
public void setCosmosAuthorizationTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public CosmosKeyCredential getCosmosKeyCredential() {
return cosmosKeyCredential;
}
} |
addressed. | public AsyncDocumentClient build() {
ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
&& this.cosmosAuthorizationTokenResolver == null && this.cosmosKeyCredential == null,
"cannot buildAsyncClient client without any one of masterKey, " +
"resource token, permissionFeed, tokenResolver and cosmos key credential");
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey()),
"cannot buildAsyncClient client without key credential");
RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
desiredConsistencyLevel,
configs,
cosmosAuthorizationTokenResolver,
cosmosKeyCredential,
sessionCapturingOverride,
transportClientSharing);
client.init();
return client;
} | cosmosAuthorizationTokenResolver, | public AsyncDocumentClient build() {
ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint");
ifThrowIllegalArgException(
this.masterKeyOrResourceToken == null && (permissionFeed == null || permissionFeed.isEmpty())
&& this.cosmosAuthorizationTokenResolver == null && this.cosmosKeyCredential == null,
"cannot buildAsyncClient client without any one of masterKey, " +
"resource token, permissionFeed, tokenResolver and cosmos key credential");
ifThrowIllegalArgException(cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey()),
"cannot buildAsyncClient client without key credential");
RxDocumentClientImpl client = new RxDocumentClientImpl(serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
desiredConsistencyLevel,
configs,
cosmosAuthorizationTokenResolver,
cosmosKeyCredential,
sessionCapturingOverride,
transportClientSharing);
client.init();
return client;
} | class Builder {
Configs configs = new Configs();
ConnectionPolicy connectionPolicy;
ConsistencyLevel desiredConsistencyLevel;
List<Permission> permissionFeed;
String masterKeyOrResourceToken;
URI serviceEndpoint;
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
CosmosKeyCredential cosmosKeyCredential;
boolean sessionCapturingOverride;
boolean transportClientSharing;
public Builder withServiceEndpoint(String serviceEndpoint) {
try {
this.serviceEndpoint = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getMessage());
}
return this;
}
/**
* New method withMasterKeyOrResourceToken will take either master key or resource token
* and perform authentication for accessing resource.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
* @deprecated use {@link
*/
@Deprecated
public Builder withMasterKey(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the master key , additionally it can also consume
* resource token too for authentication.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
*/
public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the permission list , which contains the
* resource tokens needed to access resources.
*
* @param permissionFeed Permission list for authentication.
* @return current Builder.
*/
public Builder withPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
public Builder withConfigs(Configs configs) {
this.configs = configs;
return this;
}
public Builder withSessionCapturingOverride(boolean sessionCapturingOverride) {
this.sessionCapturingOverride = sessionCapturingOverride;
return this;
}
public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
return this;
}
public Builder withTransportClientSharing(boolean transportClientSharing) {
this.transportClientSharing = transportClientSharing;
return this;
}
public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) {
if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey())) {
throw new IllegalArgumentException("Cannot buildAsyncClient client with empty key credential");
}
this.cosmosKeyCredential = cosmosKeyCredential;
return this;
}
/**
* This method will accept functional interface TokenResolver which helps in generation authorization
* token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed.
* @param cosmosAuthorizationTokenResolver The tokenResolver
* @return current Builder.
*/
public Builder withTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
public Configs getConfigs() {
return configs;
}
public void setConfigs(Configs configs) {
this.configs = configs;
}
public ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
public void setConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
}
public ConsistencyLevel getDesiredConsistencyLevel() {
return desiredConsistencyLevel;
}
public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
}
public List<Permission> getPermissionFeed() {
return permissionFeed;
}
public void setPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
}
public String getMasterKeyOrResourceToken() {
return masterKeyOrResourceToken;
}
public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
}
public URI getServiceEndpoint() {
return serviceEndpoint;
}
public void setServiceEndpoint(URI serviceEndpoint) {
this.serviceEndpoint = serviceEndpoint;
}
public CosmosAuthorizationTokenResolver getCosmosAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
public void setCosmosAuthorizationTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public CosmosKeyCredential getCosmosKeyCredential() {
return cosmosKeyCredential;
}
} | class Builder {
Configs configs = new Configs();
ConnectionPolicy connectionPolicy;
ConsistencyLevel desiredConsistencyLevel;
List<Permission> permissionFeed;
String masterKeyOrResourceToken;
URI serviceEndpoint;
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
CosmosKeyCredential cosmosKeyCredential;
boolean sessionCapturingOverride;
boolean transportClientSharing;
public Builder withServiceEndpoint(String serviceEndpoint) {
try {
this.serviceEndpoint = new URI(serviceEndpoint);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getMessage());
}
return this;
}
/**
* New method withMasterKeyOrResourceToken will take either master key or resource token
* and perform authentication for accessing resource.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
* @deprecated use {@link
*/
@Deprecated
public Builder withMasterKey(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the master key , additionally it can also consume
* resource token too for authentication.
*
* @param masterKeyOrResourceToken MasterKey or resourceToken for authentication.
* @return current Builder.
*/
public Builder withMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
return this;
}
/**
* This method will accept the permission list , which contains the
* resource tokens needed to access resources.
*
* @param permissionFeed Permission list for authentication.
* @return current Builder.
*/
public Builder withPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
return this;
}
public Builder withConfigs(Configs configs) {
this.configs = configs;
return this;
}
public Builder withSessionCapturingOverride(boolean sessionCapturingOverride) {
this.sessionCapturingOverride = sessionCapturingOverride;
return this;
}
public Builder withConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
return this;
}
public Builder withTransportClientSharing(boolean transportClientSharing) {
this.transportClientSharing = transportClientSharing;
return this;
}
public Builder withCosmosKeyCredential(CosmosKeyCredential cosmosKeyCredential) {
if (cosmosKeyCredential != null && StringUtils.isEmpty(cosmosKeyCredential.getKey())) {
throw new IllegalArgumentException("Cannot buildAsyncClient client with empty key credential");
}
this.cosmosKeyCredential = cosmosKeyCredential;
return this;
}
/**
* This method will accept functional interface TokenResolver which helps in generation authorization
* token per request. AsyncDocumentClient can be successfully initialized with this API without passing any MasterKey, ResourceToken or PermissionFeed.
* @param cosmosAuthorizationTokenResolver The tokenResolver
* @return current Builder.
*/
public Builder withTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
return this;
}
private void ifThrowIllegalArgException(boolean value, String error) {
if (value) {
throw new IllegalArgumentException(error);
}
}
public Configs getConfigs() {
return configs;
}
public void setConfigs(Configs configs) {
this.configs = configs;
}
public ConnectionPolicy getConnectionPolicy() {
return connectionPolicy;
}
public void setConnectionPolicy(ConnectionPolicy connectionPolicy) {
this.connectionPolicy = connectionPolicy;
}
public ConsistencyLevel getDesiredConsistencyLevel() {
return desiredConsistencyLevel;
}
public void setDesiredConsistencyLevel(ConsistencyLevel desiredConsistencyLevel) {
this.desiredConsistencyLevel = desiredConsistencyLevel;
}
public List<Permission> getPermissionFeed() {
return permissionFeed;
}
public void setPermissionFeed(List<Permission> permissionFeed) {
this.permissionFeed = permissionFeed;
}
public String getMasterKeyOrResourceToken() {
return masterKeyOrResourceToken;
}
public void setMasterKeyOrResourceToken(String masterKeyOrResourceToken) {
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
}
public URI getServiceEndpoint() {
return serviceEndpoint;
}
public void setServiceEndpoint(URI serviceEndpoint) {
this.serviceEndpoint = serviceEndpoint;
}
public CosmosAuthorizationTokenResolver getCosmosAuthorizationTokenResolver() {
return cosmosAuthorizationTokenResolver;
}
public void setCosmosAuthorizationTokenResolver(CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) {
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public CosmosKeyCredential getCosmosKeyCredential() {
return cosmosKeyCredential;
}
} |
fyi - I am not removing this onBeforeSendRequest , moving it early in the stack to getCreateDocumentRequest method , ensuring we are capturing meta data (collection calls) in retries | private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
} | return getStoreProxy(request).processMessage(request); | private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private CosmosKeyCredential cosmosKeyCredential;
private TokenResolver tokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
TokenResolver tokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled);
this.tokenResolver = tokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
private void initializeGatewayConfigurationReader() {
String resourceToken;
if(this.tokenResolver != null) {
resourceToken = this.tokenResolver.getAuthorizationToken(RequestVerb.GET, "", CosmosResourceType.System, null);
} else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) {
resourceToken = this.firstResourceTokenFromPermissionFeed;
} else {
assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null;
resourceToken = this.masterKeyOrResourceToken;
}
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint,
this.hasAuthKeyResourceToken,
resourceToken,
this.connectionPolicy,
this.authorizationTokenProvider,
this.reactorHttpClient);
DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block();
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block();
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeoutInMillis() / 1000,
0,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig);
} else {
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ((JsonSerializable) object).toJson();
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsString != null) {
Callable<PartitionKeyInternal> extractPartitionKeyCallable = () -> {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
cosmosItemProperties = new CosmosItemProperties(contentAsString);
}
return extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
};
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
partitionKeyInternal = serializationDiagnosticsContext.getResource(extractPartitionKeyCallable, SerializationDiagnosticsContext.SerializationType.PartitionKeyFetchSerialization);
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = document.getObjectByPath(parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
String content = toJsonString(document, mapper);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.tokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.tokenResolver != null) {
return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
try {
return CosmosResourceType.valueOf(resourceType.toString());
} catch (IllegalArgumentException e) {
return CosmosResourceType.System;
}
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
String content = toJsonString(document, mapper);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document -> document.toObject(klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private String getCurentParamName(int paramCnt){
return "@param" + paramCnt;
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType,
Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
RequestOptions requestOptions = new RequestOptions();
requestOptions.setPartitionKey(options.partitionKey());
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(null, request);
Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs);
return requestObs.flatMap(req -> this.readFeed(req)
.map(response -> toFeedResponsePage(response, klass)));
}, this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private CosmosKeyCredential cosmosKeyCredential;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
assert(databaseAccount != null);
this.useMultipleWriteLocations = this.connectionPolicy.isUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeout(),
0,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeout(this.connectionPolicy.getRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig);
} else {
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null) {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
contentAsByteBuffer.rewind();
cosmosItemProperties = new CosmosItemProperties(contentAsByteBuffer);
}
ZonedDateTime serializationStartTime = ZonedDateTime.now(ZoneOffset.UTC);
partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
ZonedDateTime serializationEndTime = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer content = serializeJsonToByteBuffer(document);
ZonedDateTime serializationEndTime = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.getMaxItemCount() != null ? options.getMaxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.isUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} |
Should we convert time to `Instant` type? On the span, would a string formatted datetime make it easier than a long? | private void addSpanRequestAttributes(Span span, Context context, String spanName) {
Objects.requireNonNull(span, "'span' cannot be null.");
String entityPath = getOrDefault(context, ENTITY_PATH_KEY, null, String.class);
if (entityPath != null) {
span.setAttribute(MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(entityPath));
}
String hostName = getOrDefault(context, HOST_NAME_KEY, null, String.class);
if (hostName != null) {
span.setAttribute(PEER_ENDPOINT, AttributeValue.stringAttributeValue(hostName));
}
Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class);
if (messageEnqueuedTime != null) {
span.setAttribute(MESSAGE_ENQUEUED_TIME, messageEnqueuedTime);
}
} | Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class); | private void addSpanRequestAttributes(Span span, Context context, String spanName) {
Objects.requireNonNull(span, "'span' cannot be null.");
String entityPath = getOrDefault(context, ENTITY_PATH_KEY, null, String.class);
if (entityPath != null) {
span.setAttribute(MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(entityPath));
}
String hostName = getOrDefault(context, HOST_NAME_KEY, null, String.class);
if (hostName != null) {
span.setAttribute(PEER_ENDPOINT, AttributeValue.stringAttributeValue(hostName));
}
Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class);
if (messageEnqueuedTime != null) {
span.setAttribute(MESSAGE_ENQUEUED_TIME, messageEnqueuedTime);
}
} | class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer {
private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry");
static final String AZ_NAMESPACE_KEY = "az.namespace";
static final String MESSAGE_BUS_DESTINATION = "message_bus.destination";
static final String PEER_ENDPOINT = "peer.address";
static final String MESSAGE_ENQUEUED_TIME = "x-opt-enqueued-time";
private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class);
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Builder spanBuilder = getSpanBuilder(spanName, context);
Span span = spanBuilder.startSpan();
if (span.isRecording()) {
String tracingNamespace = getOrDefault(context, "az.tracing.namespace", null, String.class);
if (tracingNamespace != null) {
span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(tracingNamespace));
}
}
return context.addData(PARENT_SPAN_KEY, span);
}
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context, ProcessKind processKind) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(processKind, "'processKind' cannot be null.");
Span span;
Builder spanBuilder;
switch (processKind) {
case SEND:
spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
return Context.NONE;
}
span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan();
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span);
case MESSAGE:
spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan();
if (span.isRecording()) {
span.setAttribute(AZ_NAMESPACE_KEY,
AttributeValue.stringAttributeValue(getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, "",
String.class)));
}
context = setContextData(span);
return context.addData(PARENT_SPAN_KEY, span);
case PROCESS:
return startScopedSpan(spanName, context);
default:
return Context.NONE;
}
}
/**
* {@inheritDoc}
*/
@Override
public void end(int responseCode, Throwable throwable, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
return;
}
if (span.isRecording()) {
span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable));
}
span.end();
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(String key, String value, Context context) {
Objects.requireNonNull(context, "'context' cannot be null");
if (CoreUtils.isNullOrEmpty(value)) {
logger.warning("Failed to set span attribute since value is null or empty.");
return;
}
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span != null) {
span.setAttribute(key, AttributeValue.stringAttributeValue(value));
} else {
logger.warning("Failed to find span to add attribute.");
}
}
/**
* {@inheritDoc}
*/
@Override
public Context setSpanName(String spanName, Context context) {
return context.addData(USER_SPAN_NAME_KEY, spanName);
}
/**
* {@inheritDoc}
*/
@Override
public void end(String statusMessage, Throwable throwable, Context context) {
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
logger.warning("Failed to find span to end it.");
return;
}
if (span.isRecording()) {
span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable));
}
span.end();
}
@Override
public void addLink(Context context) {
final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
logger.warning("Failed to find spanBuilder to link it.");
return;
}
final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext == null) {
logger.warning("Failed to find span context to link it.");
return;
}
spanBuilder.addLink(spanContext);
}
/**
* {@inheritDoc}
*/
@Override
public Context extractContext(String diagnosticId, Context context) {
return AmqpPropagationFormatUtil.extractContext(diagnosticId, context);
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context));
}
/**
* Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context,
* to return an object that represents that scope.
* <p>The scope is exited when the returned object is closed.</p>
*
* @param spanName The name of the returned Span.
* @param context The {@link Context} containing the {@link SpanContext}.
*
* @return The returned {@link Span} and the scope in a {@link Context} object.
*/
private Context startScopedSpan(String spanName, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
Span span;
SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext != null) {
span = startSpanWithRemoteParent(spanName, spanContext);
} else {
Builder spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.CONSUMER).startSpan();
}
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span));
}
/**
* Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and
* designated by the {@link SpanContext}.
*
* @param spanName The name of the returned Span.
* @param spanContext The remote parent context of the returned Span.
*
* @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}.
*/
private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) {
Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext);
spanBuilder.setSpanKind(Span.Kind.CONSUMER);
return spanBuilder.startSpan();
}
/**
* Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as
* text and returns in a {@link Context} object.
*
* @param span The current tracing span.
*
* @return The {@link Context} containing the {@link SpanContext} and trace-parent of the current span.
*/
private static Context setContextData(Span span) {
SpanContext spanContext = span.getContext();
final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext);
return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext);
}
/**
* Extracts request attributes from the given {@code context} and adds it to the started span.
*
* @param span The span to which request attributes are to be added.
* @param context The context containing the request attributes.
* @param spanName The name of the returned Span containing the component value.
*/
/**
* Returns a {@link Builder} to create and start a new child {@link Span} with parent
* being the designated {@code Span}.
*
* @param spanName The name of the returned Span.
* @param context The context containing the span and the span name.
*
* @return A {@code Span.Builder} to create and start a new {@code Span}.
*/
private Builder getSpanBuilder(String spanName, Context context) {
Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class);
if (spanNameKey == null) {
spanNameKey = spanName;
}
if (parentSpan == null) {
parentSpan = TRACER.getCurrentSpan();
}
return TRACER.spanBuilder(spanNameKey).setParent(parentSpan);
}
/**
* Returns the value of the specified key from the context.
*
* @param key The name of the attribute that needs to be extracted from the {@code Context}.
* @param defaultValue the value to return in data not found.
* @param clazz clazz the type of raw class to find data for.
* @param context The context containing the specified key.
*
* @return The T type of raw class object
*/
@SuppressWarnings("unchecked")
private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) {
final Optional<Object> optional = context.getData(key);
final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> {
logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz);
return defaultValue;
});
return (T) result;
}
} | class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer {
private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry");
static final String AZ_NAMESPACE_KEY = "az.namespace";
static final String MESSAGE_BUS_DESTINATION = "message_bus.destination";
static final String PEER_ENDPOINT = "peer.address";
private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class);
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Builder spanBuilder = getSpanBuilder(spanName, context);
Span span = spanBuilder.startSpan();
if (span.isRecording()) {
String tracingNamespace = getOrDefault(context, "az.tracing.namespace", null, String.class);
if (tracingNamespace != null) {
span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(tracingNamespace));
}
}
return context.addData(PARENT_SPAN_KEY, span);
}
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context, ProcessKind processKind) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(processKind, "'processKind' cannot be null.");
Span span;
Builder spanBuilder;
switch (processKind) {
case SEND:
spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
return Context.NONE;
}
span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan();
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span);
case MESSAGE:
spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan();
if (span.isRecording()) {
span.setAttribute(AZ_NAMESPACE_KEY,
AttributeValue.stringAttributeValue(getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, "",
String.class)));
}
context = setContextData(span);
return context.addData(PARENT_SPAN_KEY, span);
case PROCESS:
return startScopedSpan(spanName, context);
default:
return Context.NONE;
}
}
/**
* {@inheritDoc}
*/
@Override
public void end(int responseCode, Throwable throwable, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
return;
}
if (span.isRecording()) {
span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable));
}
span.end();
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(String key, String value, Context context) {
Objects.requireNonNull(context, "'context' cannot be null");
if (CoreUtils.isNullOrEmpty(value)) {
logger.warning("Failed to set span attribute since value is null or empty.");
return;
}
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span != null) {
span.setAttribute(key, AttributeValue.stringAttributeValue(value));
} else {
logger.warning("Failed to find span to add attribute.");
}
}
/**
* {@inheritDoc}
*/
@Override
public Context setSpanName(String spanName, Context context) {
return context.addData(USER_SPAN_NAME_KEY, spanName);
}
/**
* {@inheritDoc}
*/
@Override
public void end(String statusMessage, Throwable throwable, Context context) {
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
logger.warning("Failed to find span to end it.");
return;
}
if (span.isRecording()) {
span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable));
}
span.end();
}
@Override
public void addLink(Context context) {
final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
logger.warning("Failed to find spanBuilder to link it.");
return;
}
final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext == null) {
logger.warning("Failed to find span context to link it.");
return;
}
spanBuilder.addLink(spanContext);
}
/**
* {@inheritDoc}
*/
@Override
public Context extractContext(String diagnosticId, Context context) {
return AmqpPropagationFormatUtil.extractContext(diagnosticId, context);
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context));
}
/**
* Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context,
* to return an object that represents that scope.
* <p>The scope is exited when the returned object is closed.</p>
*
* @param spanName The name of the returned Span.
* @param context The {@link Context} containing the {@link SpanContext}.
*
* @return The returned {@link Span} and the scope in a {@link Context} object.
*/
private Context startScopedSpan(String spanName, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
Span span;
SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext != null) {
span = startSpanWithRemoteParent(spanName, spanContext);
} else {
Builder spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.CONSUMER).startSpan();
}
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span));
}
/**
* Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and
* designated by the {@link SpanContext}.
*
* @param spanName The name of the returned Span.
* @param spanContext The remote parent context of the returned Span.
*
* @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}.
*/
private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) {
Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext);
spanBuilder.setSpanKind(Span.Kind.CONSUMER);
return spanBuilder.startSpan();
}
/**
* Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as
* text and returns in a {@link Context} object.
*
* @param span The current tracing span.
*
* @return The {@link Context} containing the {@link SpanContext} and trace-parent of the current span.
*/
private static Context setContextData(Span span) {
SpanContext spanContext = span.getContext();
final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext);
return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext);
}
/**
* Extracts request attributes from the given {@code context} and adds it to the started span.
*
* @param span The span to which request attributes are to be added.
* @param context The context containing the request attributes.
* @param spanName The name of the returned Span containing the component value.
*/
/**
* Returns a {@link Builder} to create and start a new child {@link Span} with parent
* being the designated {@code Span}.
*
* @param spanName The name of the returned Span.
* @param context The context containing the span and the span name.
*
* @return A {@code Span.Builder} to create and start a new {@code Span}.
*/
private Builder getSpanBuilder(String spanName, Context context) {
Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class);
if (spanNameKey == null) {
spanNameKey = spanName;
}
if (parentSpan == null) {
parentSpan = TRACER.getCurrentSpan();
}
return TRACER.spanBuilder(spanNameKey).setParent(parentSpan);
}
/**
* Returns the value of the specified key from the context.
*
* @param key The name of the attribute that needs to be extracted from the {@code Context}.
* @param defaultValue the value to return in data not found.
* @param clazz clazz the type of raw class to find data for.
* @param context The context containing the specified key.
*
* @return The T type of raw class object
*/
@SuppressWarnings("unchecked")
private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) {
final Optional<Object> optional = context.getData(key);
final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> {
logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz);
return defaultValue;
});
return (T) result;
}
} |
the purpose of `getCreateDocumentRequest` is just to create the request. retry-policy interaction should happen outside. why are we moving this? | private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
} | return getStoreProxy(request).processMessage(request); | private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.PUT);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private CosmosKeyCredential cosmosKeyCredential;
private TokenResolver tokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
TokenResolver tokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled);
this.tokenResolver = tokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
private void initializeGatewayConfigurationReader() {
String resourceToken;
if(this.tokenResolver != null) {
resourceToken = this.tokenResolver.getAuthorizationToken(RequestVerb.GET, "", CosmosResourceType.System, null);
} else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) {
resourceToken = this.firstResourceTokenFromPermissionFeed;
} else {
assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null;
resourceToken = this.masterKeyOrResourceToken;
}
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint,
this.hasAuthKeyResourceToken,
resourceToken,
this.connectionPolicy,
this.authorizationTokenProvider,
this.reactorHttpClient);
DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block();
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block();
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeoutInMillis() / 1000,
0,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig);
} else {
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, collection, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ((JsonSerializable) object).toJson();
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
String contentAsString,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsString != null) {
Callable<PartitionKeyInternal> extractPartitionKeyCallable = () -> {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
cosmosItemProperties = new CosmosItemProperties(contentAsString);
}
return extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
};
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
partitionKeyInternal = serializationDiagnosticsContext.getResource(extractPartitionKeyCallable, SerializationDiagnosticsContext.SerializationType.PartitionKeyFetchSerialization);
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = document.getObjectByPath(parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
String content = toJsonString(document, mapper);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.tokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.tokenResolver != null) {
return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
try {
return CosmosResourceType.valueOf(resourceType.toString());
} catch (IllegalArgumentException e) {
return CosmosResourceType.System;
}
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
String content = toJsonString(document, mapper);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document -> document.toObject(klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
SqlParameterList parameters = new SqlParameterList();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = BridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private String getCurentParamName(int paramCnt){
return "@param" + paramCnt;
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType,
Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
RequestOptions requestOptions = new RequestOptions();
requestOptions.setPartitionKey(options.partitionKey());
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(null, request);
Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs);
return requestObs.flatMap(req -> this.readFeed(req)
.map(response -> toFeedResponsePage(response, klass)));
}, this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider {
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private CosmosKeyCredential cosmosKeyCredential;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final HttpClient reactorHttpClient;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length <= 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosKeyCredential cosmosKeyCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled) {
logger.info(
"Initializing DocumentClient with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.cosmosKeyCredential = cosmosKeyCredential;
if (this.cosmosKeyCredential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){
this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy();
}
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
assert(databaseAccount != null);
this.useMultipleWriteLocations = this.connectionPolicy.isUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
public void init() {
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy);
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
}
private void initializeDirectConnectivity() {
this.storeClientFactory = new StoreClientFactory(
this.configs,
this.connectionPolicy.getRequestTimeout(),
0,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled
);
this.addressResolver = new GlobalAddressResolver(
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient) {
return new RxGatewayStoreModel(sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxPoolSize())
.withHttpProxy(this.connectionPolicy.getProxy())
.withRequestTimeout(this.connectionPolicy.getRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig);
} else {
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
false
);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public URI getWriteEndpoint() {
return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null);
}
@Override
public URI getReadEndpoint() {
return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) {
return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private <T extends Resource> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum) {
String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) {
return queryDatabases(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create,
ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.DELETE);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.GET);
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request);
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.GET);
return gatewayProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
populateHeaders(request, RequestVerb.POST);
return this.getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
FeedOptions options) {
return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(Object[] objectArray) {
String[] stringArray = new String[objectArray.length];
for (int i = 0; i < objectArray.length; ++i) {
Object object = objectArray[i];
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
if (options.getAccessCondition() != null) {
if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition());
} else {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition());
}
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.isPopulateQuotaInfo()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
return headers;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null) {
CosmosItemProperties cosmosItemProperties;
if (objectDoc instanceof CosmosItemProperties) {
cosmosItemProperties = (CosmosItemProperties) objectDoc;
} else {
contentAsByteBuffer.rewind();
cosmosItemProperties = new CosmosItemProperties(contentAsByteBuffer);
}
ZonedDateTime serializationStartTime = ZonedDateTime.now(ZoneOffset.UTC);
partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition);
ZonedDateTime serializationEndTime = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private static PartitionKeyInternal extractPartitionKeyValueFromDocument(
CosmosItemProperties document,
PartitionKeyDefinition partitionKeyDefinition) {
if (partitionKeyDefinition != null) {
String path = partitionKeyDefinition.getPaths().iterator().next();
List<String> parts = PathParser.getPathParts(path);
if (parts.size() >= 1) {
Object value = ModelBridgeInternal.getObjectByPathFromJsonSerializable(document, parts);
if (value == null || value.getClass() == ObjectNode.class) {
value = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
}
if (value instanceof PartitionKeyInternal) {
return (PartitionKeyInternal) value;
} else {
return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false);
}
}
}
return null;
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer content = BridgeInternal.serializeJsonToByteBuffer(document, mapper);
ZonedDateTime serializationEndTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path,
requestHeaders, options, content);
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private void populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.cosmosKeyCredential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (cosmosKeyCredential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) {
populateHeaders(request, RequestVerb.POST);
RxStoreModel storeProxy = this.getStoreProxy(request);
if(request.requestContext != null && retryPolicy.getRetryCount() > 0) {
retryPolicy.updateEndTime();
request.requestContext.updateRetryContext(retryPolicy, true);
}
return storeProxy.processMessage(request);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
populateHeaders(request, RequestVerb.POST);
Map<String, String> headers = request.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) {
documentClientRetryPolicy.updateEndTime();
request.requestContext.updateRetryContext(documentClientRetryPolicy, true);
}
return getStoreProxy(request).processMessage(request)
.map(response -> {
this.captureSessionToken(request, response);
return response;
}
);
}
@Override
public Mono<ResourceResponse<Document>> createDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> {
return create(request, requestRetryPolicy);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return upsert(request, retryPolicyInstance);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders = getRequestHeaders(options);
ZonedDateTime serializationStartTimeUTC = ZonedDateTime.now(ZoneOffset.UTC);
ByteBuffer content = serializeJsonToByteBuffer(document);
ZonedDateTime serializationEndTime = ZonedDateTime.now(ZoneOffset.UTC);
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Document, path, requestHeaders, options, content);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosResponseDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
return replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));} );
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
return this.delete(req, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));});
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Document, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", options);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<Pair<String, PartitionKey>> itemKeyList,
String collectionLink,
FeedOptions options,
Class<T> klass) {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
throw new IllegalStateException("Collection cannot be null");
}
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosResponseDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap =
new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
throw new IllegalStateException("Failed to get routing map.");
}
itemKeyList
.forEach(stringPartitionKeyPair -> {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(BridgeInternal
.getPartitionKeyInternal(stringPartitionKeyPair
.getRight()),
collection
.getPartitionKey());
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<Pair<String, PartitionKey>> list = new ArrayList<>();
list.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, list);
} else {
List<Pair<String, PartitionKey>> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(stringPartitionKeyPair);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Set<PartitionKeyRange> partitionKeyRanges = partitionRangeItemKeyMap.keySet();
List<PartitionKeyRange> ranges = new ArrayList<>();
ranges.addAll(partitionKeyRanges);
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap;
rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap,
collection.getPartitionKey());
String sqlQuery = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
return createReadManyQuery(collectionLink,
new SqlQuerySpec(sqlQuery),
options,
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap))
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<T>();
HashMap<String, String> headers = new HashMap<>();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponse(finalList, headers);
return frp;
});
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<Pair<String, PartitionKey>>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<Pair<String, PartitionKey>>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(entry.getValue(), partitionKeySelector);
} else {
sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
String idValue = pair.getLeft();
String idParamName = "@param" + i;
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
if (!Objects.equals(idValue, pkValue)) {
continue;
}
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<Pair<String, PartitionKey>> idPartitionKeyPairList, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
Pair<String, PartitionKey> pair = idPartitionKeyPairList.get(i);
PartitionKey pkValueAsPartitionKey = pair.getRight();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = pair.getLeft();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> createReadManyQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
FeedOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
UUID activityId = Utils.randomUUID();
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this);
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(queryClient, collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query,
FeedOptions options) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), options);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
return RxDocumentClientImpl.this.query(request).single();
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document);
}
@Override
public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink,
final ChangeFeedOptions changeFeedOptions) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document,
Document.class, collectionLink, changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure,
path, storedProcedure, requestHeaders, options);
return request;
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
FeedOptions options) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
Object[] procedureParams) {
return this.executeStoredProcedure(storedProcedureLink, null, procedureParams);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, Object[] procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy)
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
FeedOptions options) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Upsert);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
String query, FeedOptions options) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink,
SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return readFeed(options, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
FeedOptions options) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user,
requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return readFeed(options, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) {
return queryUsers(databaseLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy());
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path,
permission, requestHeaders, options);
return request;
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return readFeed(options, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
FeedOptions options) {
return queryPermissions(userLink, new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
FeedOptions options) {
return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) {
return readFeed(options, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) {
if (options == null) {
options = new FeedOptions();
}
int maxPageSize = options.getMaxItemCount() != null ? options.getMaxItemCount() : -1;
final FeedOptions finalFeedOptions = options;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed,
resourceType, resourceLink, requestHeaders, finalFeedOptions);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> {
return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)),
this.resetSessionTokenRetryPolicy.getRequestPolicy());
};
return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) {
return queryOffers(new SqlQuerySpec(query), options);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) {
return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy();
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response));
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read,
ResourceType.DatabaseAccount, "", null, (Object) null);
this.populateHeaders(request, RequestVerb.GET);
request.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(request).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount -> {
this.useMultipleWriteLocations = this.connectionPolicy.isUsingMultipleWriteLocations()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.UseGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
try {
this.reactorHttpClient.shutdown();
} catch (Exception e) {
logger.warn("shutting down reactorHttpClient failed", e);
}
logger.info("Shutting down completed.");
}
} |
Did you mean to change it from eventData2 -> eventData1? The same event data returns two different sequence values? | public void testProcessSpans() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData1.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData1.getOffset()).thenReturn(100L);
when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208));
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
Map<String, Object> properties = new HashMap<>();
properties.put(DIAGNOSTIC_ID_KEY, diagnosticId);
when(eventData1.getProperties()).thenReturn(properties);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1)));
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent());
return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (Closeable) () -> {
return;
}).addData(PARENT_SPAN_KEY, "value2");
}
);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
eventProcessorClient.stop();
verify(tracer1, times(1)).extractContext(eq(diagnosticId), any());
verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS));
verify(tracer1, times(1)).end(eq("success"), isNull(), any());
} | when(eventData1.getSequenceNumber()).thenReturn(2L); | public void testProcessSpans() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData1.getOffset()).thenReturn(100L);
when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208));
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
Map<String, Object> properties = new HashMap<>();
properties.put(DIAGNOSTIC_ID_KEY, diagnosticId);
when(eventData1.getProperties()).thenReturn(properties);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1)));
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent());
return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (Closeable) () -> {
return;
}).addData(PARENT_SPAN_KEY, "value2");
}
);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
eventProcessorClient.stop();
verify(tracer1, times(1)).extractContext(eq(diagnosticId), any());
verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS));
verify(tracer1, times(1)).end(eq("success"), isNull(), any());
} | class EventProcessorClientTest {
@Mock
private EventHubClientBuilder eventHubClientBuilder;
@Mock
private EventHubAsyncClient eventHubAsyncClient;
@Mock
private EventHubConsumerAsyncClient consumer1, consumer2, consumer3;
@Mock
private EventData eventData1, eventData2, eventData3, eventData4;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void teardown() {
consumer1 = null;
consumer2 = null;
consumer3 = null;
eventData1 = null;
eventData2 = null;
eventData3 = null;
eventData4 = null;
eventHubAsyncClient = null;
Mockito.framework().clearInlineMocks();
}
/**
* Tests all the happy cases for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithSimplePartitionProcessor() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor();
final long beforeTest = System.currentTimeMillis();
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value1")
.addData("scope", (Closeable) () -> {
})
.addData(PARENT_SPAN_KEY, "value2");
}
);
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
() -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
assertNotNull(eventProcessorClient.getIdentifier());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds();
verify(eventHubAsyncClient, atLeastOnce())
.createConsumer(anyString(), anyInt());
verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class),
any(ReceiveOptions.class));
verify(consumer1, atLeastOnce()).close();
eventProcessorClient.stop();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals("", partitionOwnership.getOwnerId(), "Owner Id");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests process start spans invoked for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
/**
* Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithMultiplePartitions() throws Exception {
final CountDownLatch count = new CountDownLatch(1);
final Set<String> identifiers = new HashSet<>();
identifiers.add("1");
identifiers.add("2");
identifiers.add("3");
final EventPosition position = EventPosition.latest();
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3"));
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1, consumer2, consumer3);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers));
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown())
.thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2))));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3))));
when(eventData3.getSequenceNumber()).thenReturn(1L);
when(eventData3.getOffset()).thenReturn(1L);
when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4))));
when(eventData4.getSequenceNumber()).thenReturn(1L);
when(eventData4.getOffset()).thenReturn(1L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder,
"test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
final boolean completed = count.await(10, TimeUnit.SECONDS);
eventProcessorClient.stop();
Assertions.assertTrue(completed);
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
verify(eventHubAsyncClient, atLeast(1)).getPartitionIds();
verify(eventHubAsyncClient, times(1))
.createConsumer(anyString(), anyInt());
Assertions.assertEquals(2, identifiers.size());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(po -> {
String partitionId = po.getPartitionId();
verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any());
}).verifyComplete();
}
private PartitionEvent getEvent(EventData event) {
PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz");
return new PartitionEvent(context, event, null);
}
private static final class TestPartitionProcessor extends PartitionProcessor {
@Override
public void processEvent(EventContext eventContext) {
eventContext.updateCheckpoint();
}
@Override
public void processError(ErrorContext errorContext) {
return;
}
}
} | class EventProcessorClientTest {
@Mock
private EventHubClientBuilder eventHubClientBuilder;
@Mock
private EventHubAsyncClient eventHubAsyncClient;
@Mock
private EventHubConsumerAsyncClient consumer1, consumer2, consumer3;
@Mock
private EventData eventData1, eventData2, eventData3, eventData4;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void teardown() {
consumer1 = null;
consumer2 = null;
consumer3 = null;
eventData1 = null;
eventData2 = null;
eventData3 = null;
eventData4 = null;
eventHubAsyncClient = null;
Mockito.framework().clearInlineMocks();
}
/**
* Tests all the happy cases for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithSimplePartitionProcessor() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor();
final long beforeTest = System.currentTimeMillis();
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value1")
.addData("scope", (Closeable) () -> {
})
.addData(PARENT_SPAN_KEY, "value2");
}
);
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
() -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
assertNotNull(eventProcessorClient.getIdentifier());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds();
verify(eventHubAsyncClient, atLeastOnce())
.createConsumer(anyString(), anyInt());
verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class),
any(ReceiveOptions.class));
verify(consumer1, atLeastOnce()).close();
eventProcessorClient.stop();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals("", partitionOwnership.getOwnerId(), "Owner Id");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests process start spans invoked for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
/**
* Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithMultiplePartitions() throws Exception {
final CountDownLatch count = new CountDownLatch(1);
final Set<String> identifiers = new HashSet<>();
identifiers.add("1");
identifiers.add("2");
identifiers.add("3");
final EventPosition position = EventPosition.latest();
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3"));
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1, consumer2, consumer3);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers));
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown())
.thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2))));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3))));
when(eventData3.getSequenceNumber()).thenReturn(1L);
when(eventData3.getOffset()).thenReturn(1L);
when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4))));
when(eventData4.getSequenceNumber()).thenReturn(1L);
when(eventData4.getOffset()).thenReturn(1L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder,
"test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
final boolean completed = count.await(10, TimeUnit.SECONDS);
eventProcessorClient.stop();
Assertions.assertTrue(completed);
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
verify(eventHubAsyncClient, atLeast(1)).getPartitionIds();
verify(eventHubAsyncClient, times(1))
.createConsumer(anyString(), anyInt());
Assertions.assertEquals(2, identifiers.size());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(po -> {
String partitionId = po.getPartitionId();
verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any());
}).verifyComplete();
}
private PartitionEvent getEvent(EventData event) {
PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz");
return new PartitionEvent(context, event, null);
}
private static final class TestPartitionProcessor extends PartitionProcessor {
@Override
public void processEvent(EventContext eventContext) {
eventContext.updateCheckpoint();
}
@Override
public void processError(ErrorContext errorContext) {
return;
}
}
} |
Was meaning to remove eventData2 altogether. Removed now. | public void testProcessSpans() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData1.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData1.getOffset()).thenReturn(100L);
when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208));
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
Map<String, Object> properties = new HashMap<>();
properties.put(DIAGNOSTIC_ID_KEY, diagnosticId);
when(eventData1.getProperties()).thenReturn(properties);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1)));
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent());
return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (Closeable) () -> {
return;
}).addData(PARENT_SPAN_KEY, "value2");
}
);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
eventProcessorClient.stop();
verify(tracer1, times(1)).extractContext(eq(diagnosticId), any());
verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS));
verify(tracer1, times(1)).end(eq("success"), isNull(), any());
} | when(eventData1.getSequenceNumber()).thenReturn(2L); | public void testProcessSpans() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData1.getOffset()).thenReturn(100L);
when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208));
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
Map<String, Object> properties = new HashMap<>();
properties.put(DIAGNOSTIC_ID_KEY, diagnosticId);
when(eventData1.getProperties()).thenReturn(properties);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1)));
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent());
return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (Closeable) () -> {
return;
}).addData(PARENT_SPAN_KEY, "value2");
}
);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
eventProcessorClient.stop();
verify(tracer1, times(1)).extractContext(eq(diagnosticId), any());
verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS));
verify(tracer1, times(1)).end(eq("success"), isNull(), any());
} | class EventProcessorClientTest {
@Mock
private EventHubClientBuilder eventHubClientBuilder;
@Mock
private EventHubAsyncClient eventHubAsyncClient;
@Mock
private EventHubConsumerAsyncClient consumer1, consumer2, consumer3;
@Mock
private EventData eventData1, eventData2, eventData3, eventData4;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void teardown() {
consumer1 = null;
consumer2 = null;
consumer3 = null;
eventData1 = null;
eventData2 = null;
eventData3 = null;
eventData4 = null;
eventHubAsyncClient = null;
Mockito.framework().clearInlineMocks();
}
/**
* Tests all the happy cases for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithSimplePartitionProcessor() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor();
final long beforeTest = System.currentTimeMillis();
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value1")
.addData("scope", (Closeable) () -> {
})
.addData(PARENT_SPAN_KEY, "value2");
}
);
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
() -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
assertNotNull(eventProcessorClient.getIdentifier());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds();
verify(eventHubAsyncClient, atLeastOnce())
.createConsumer(anyString(), anyInt());
verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class),
any(ReceiveOptions.class));
verify(consumer1, atLeastOnce()).close();
eventProcessorClient.stop();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals("", partitionOwnership.getOwnerId(), "Owner Id");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests process start spans invoked for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
/**
* Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithMultiplePartitions() throws Exception {
final CountDownLatch count = new CountDownLatch(1);
final Set<String> identifiers = new HashSet<>();
identifiers.add("1");
identifiers.add("2");
identifiers.add("3");
final EventPosition position = EventPosition.latest();
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3"));
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1, consumer2, consumer3);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers));
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown())
.thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2))));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3))));
when(eventData3.getSequenceNumber()).thenReturn(1L);
when(eventData3.getOffset()).thenReturn(1L);
when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4))));
when(eventData4.getSequenceNumber()).thenReturn(1L);
when(eventData4.getOffset()).thenReturn(1L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder,
"test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
final boolean completed = count.await(10, TimeUnit.SECONDS);
eventProcessorClient.stop();
Assertions.assertTrue(completed);
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
verify(eventHubAsyncClient, atLeast(1)).getPartitionIds();
verify(eventHubAsyncClient, times(1))
.createConsumer(anyString(), anyInt());
Assertions.assertEquals(2, identifiers.size());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(po -> {
String partitionId = po.getPartitionId();
verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any());
}).verifyComplete();
}
private PartitionEvent getEvent(EventData event) {
PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz");
return new PartitionEvent(context, event, null);
}
private static final class TestPartitionProcessor extends PartitionProcessor {
@Override
public void processEvent(EventContext eventContext) {
eventContext.updateCheckpoint();
}
@Override
public void processError(ErrorContext errorContext) {
return;
}
}
} | class EventProcessorClientTest {
@Mock
private EventHubClientBuilder eventHubClientBuilder;
@Mock
private EventHubAsyncClient eventHubAsyncClient;
@Mock
private EventHubConsumerAsyncClient consumer1, consumer2, consumer3;
@Mock
private EventData eventData1, eventData2, eventData3, eventData4;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@AfterEach
public void teardown() {
consumer1 = null;
consumer2 = null;
consumer3 = null;
eventData1 = null;
eventData2 = null;
eventData3 = null;
eventData4 = null;
eventHubAsyncClient = null;
Mockito.framework().clearInlineMocks();
}
/**
* Tests all the happy cases for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithSimplePartitionProcessor() throws Exception {
final Tracer tracer1 = mock(Tracer.class);
final List<Tracer> tracers = Collections.singletonList(tracer1);
TracerProvider tracerProvider = new TracerProvider(tracers);
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1"));
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1);
when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor();
final long beforeTest = System.currentTimeMillis();
String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01";
when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value");
}
);
when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer(
invocation -> {
Context passed = invocation.getArgument(1, Context.class);
return passed.addData(SPAN_CONTEXT_KEY, "value1")
.addData("scope", (Closeable) () -> {
})
.addData(PARENT_SPAN_KEY, "value2");
}
);
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer",
() -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
TimeUnit.SECONDS.sleep(10);
assertNotNull(eventProcessorClient.getIdentifier());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds();
verify(eventHubAsyncClient, atLeastOnce())
.createConsumer(anyString(), anyInt());
verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class),
any(ReceiveOptions.class));
verify(consumer1, atLeastOnce()).close();
eventProcessorClient.stop();
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(partitionOwnership -> {
assertEquals("1", partitionOwnership.getPartitionId(), "Partition");
assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer");
assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name");
assertEquals("", partitionOwnership.getOwnerId(), "Owner Id");
assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime");
assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime");
assertNotNull(partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests process start spans invoked for {@link EventProcessorClient}.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
/**
* Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions.
*
* @throws Exception if an error occurs while running the test.
*/
@Test
public void testWithMultiplePartitions() throws Exception {
final CountDownLatch count = new CountDownLatch(1);
final Set<String> identifiers = new HashSet<>();
identifiers.add("1");
identifiers.add("2");
identifiers.add("3");
final EventPosition position = EventPosition.latest();
when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3"));
when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns");
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(eventHubAsyncClient
.createConsumer(anyString(), anyInt()))
.thenReturn(consumer1, consumer2, consumer3);
when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers));
when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh");
when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown())
.thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2))));
when(eventData1.getSequenceNumber()).thenReturn(1L);
when(eventData2.getSequenceNumber()).thenReturn(2L);
when(eventData1.getOffset()).thenReturn(1L);
when(eventData2.getOffset()).thenReturn(100L);
when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3))));
when(eventData3.getSequenceNumber()).thenReturn(1L);
when(eventData3.getOffset()).thenReturn(1L);
when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any()))
.thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4))));
when(eventData4.getSequenceNumber()).thenReturn(1L);
when(eventData4.getOffset()).thenReturn(1L);
final InMemoryCheckpointStore checkpointStore = new InMemoryCheckpointStore();
final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder,
"test-consumer",
TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>());
eventProcessorClient.start();
final boolean completed = count.await(10, TimeUnit.SECONDS);
eventProcessorClient.stop();
Assertions.assertTrue(completed);
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.expectNextCount(1).verifyComplete();
verify(eventHubAsyncClient, atLeast(1)).getPartitionIds();
verify(eventHubAsyncClient, times(1))
.createConsumer(anyString(), anyInt());
Assertions.assertEquals(2, identifiers.size());
StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer"))
.assertNext(po -> {
String partitionId = po.getPartitionId();
verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any());
}).verifyComplete();
}
private PartitionEvent getEvent(EventData event) {
PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz");
return new PartitionEvent(context, event, null);
}
private static final class TestPartitionProcessor extends PartitionProcessor {
@Override
public void processEvent(EventContext eventContext) {
eventContext.updateCheckpoint();
}
@Override
public void processError(ErrorContext errorContext) {
return;
}
}
} |
There is a direct specification to have this as a long value. It could be directly be used as-is if kept long, maybe? >Please add enqueuedTime attribute on each link when processing messages: unix epoch time with milliseconds precision representing when message was enqueued (x-opt-enqueued-time system property). Attribute value should have long type. | private void addSpanRequestAttributes(Span span, Context context, String spanName) {
Objects.requireNonNull(span, "'span' cannot be null.");
String entityPath = getOrDefault(context, ENTITY_PATH_KEY, null, String.class);
if (entityPath != null) {
span.setAttribute(MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(entityPath));
}
String hostName = getOrDefault(context, HOST_NAME_KEY, null, String.class);
if (hostName != null) {
span.setAttribute(PEER_ENDPOINT, AttributeValue.stringAttributeValue(hostName));
}
Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class);
if (messageEnqueuedTime != null) {
span.setAttribute(MESSAGE_ENQUEUED_TIME, messageEnqueuedTime);
}
} | Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class); | private void addSpanRequestAttributes(Span span, Context context, String spanName) {
Objects.requireNonNull(span, "'span' cannot be null.");
String entityPath = getOrDefault(context, ENTITY_PATH_KEY, null, String.class);
if (entityPath != null) {
span.setAttribute(MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(entityPath));
}
String hostName = getOrDefault(context, HOST_NAME_KEY, null, String.class);
if (hostName != null) {
span.setAttribute(PEER_ENDPOINT, AttributeValue.stringAttributeValue(hostName));
}
Long messageEnqueuedTime = getOrDefault(context, MESSAGE_ENQUEUED_TIME, null, Long.class);
if (messageEnqueuedTime != null) {
span.setAttribute(MESSAGE_ENQUEUED_TIME, messageEnqueuedTime);
}
} | class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer {
private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry");
static final String AZ_NAMESPACE_KEY = "az.namespace";
static final String MESSAGE_BUS_DESTINATION = "message_bus.destination";
static final String PEER_ENDPOINT = "peer.address";
static final String MESSAGE_ENQUEUED_TIME = "x-opt-enqueued-time";
private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class);
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Builder spanBuilder = getSpanBuilder(spanName, context);
Span span = spanBuilder.startSpan();
if (span.isRecording()) {
String tracingNamespace = getOrDefault(context, "az.tracing.namespace", null, String.class);
if (tracingNamespace != null) {
span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(tracingNamespace));
}
}
return context.addData(PARENT_SPAN_KEY, span);
}
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context, ProcessKind processKind) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(processKind, "'processKind' cannot be null.");
Span span;
Builder spanBuilder;
switch (processKind) {
case SEND:
spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
return Context.NONE;
}
span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan();
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span);
case MESSAGE:
spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan();
if (span.isRecording()) {
span.setAttribute(AZ_NAMESPACE_KEY,
AttributeValue.stringAttributeValue(getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, "",
String.class)));
}
context = setContextData(span);
return context.addData(PARENT_SPAN_KEY, span);
case PROCESS:
return startScopedSpan(spanName, context);
default:
return Context.NONE;
}
}
/**
* {@inheritDoc}
*/
@Override
public void end(int responseCode, Throwable throwable, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
return;
}
if (span.isRecording()) {
span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable));
}
span.end();
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(String key, String value, Context context) {
Objects.requireNonNull(context, "'context' cannot be null");
if (CoreUtils.isNullOrEmpty(value)) {
logger.warning("Failed to set span attribute since value is null or empty.");
return;
}
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span != null) {
span.setAttribute(key, AttributeValue.stringAttributeValue(value));
} else {
logger.warning("Failed to find span to add attribute.");
}
}
/**
* {@inheritDoc}
*/
@Override
public Context setSpanName(String spanName, Context context) {
return context.addData(USER_SPAN_NAME_KEY, spanName);
}
/**
* {@inheritDoc}
*/
@Override
public void end(String statusMessage, Throwable throwable, Context context) {
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
logger.warning("Failed to find span to end it.");
return;
}
if (span.isRecording()) {
span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable));
}
span.end();
}
@Override
public void addLink(Context context) {
final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
logger.warning("Failed to find spanBuilder to link it.");
return;
}
final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext == null) {
logger.warning("Failed to find span context to link it.");
return;
}
spanBuilder.addLink(spanContext);
}
/**
* {@inheritDoc}
*/
@Override
public Context extractContext(String diagnosticId, Context context) {
return AmqpPropagationFormatUtil.extractContext(diagnosticId, context);
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context));
}
/**
* Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context,
* to return an object that represents that scope.
* <p>The scope is exited when the returned object is closed.</p>
*
* @param spanName The name of the returned Span.
* @param context The {@link Context} containing the {@link SpanContext}.
*
* @return The returned {@link Span} and the scope in a {@link Context} object.
*/
private Context startScopedSpan(String spanName, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
Span span;
SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext != null) {
span = startSpanWithRemoteParent(spanName, spanContext);
} else {
Builder spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.CONSUMER).startSpan();
}
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span));
}
/**
* Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and
* designated by the {@link SpanContext}.
*
* @param spanName The name of the returned Span.
* @param spanContext The remote parent context of the returned Span.
*
* @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}.
*/
private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) {
Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext);
spanBuilder.setSpanKind(Span.Kind.CONSUMER);
return spanBuilder.startSpan();
}
/**
* Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as
* text and returns in a {@link Context} object.
*
* @param span The current tracing span.
*
* @return The {@link Context} containing the {@link SpanContext} and trace-parent of the current span.
*/
private static Context setContextData(Span span) {
SpanContext spanContext = span.getContext();
final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext);
return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext);
}
/**
* Extracts request attributes from the given {@code context} and adds it to the started span.
*
* @param span The span to which request attributes are to be added.
* @param context The context containing the request attributes.
* @param spanName The name of the returned Span containing the component value.
*/
/**
* Returns a {@link Builder} to create and start a new child {@link Span} with parent
* being the designated {@code Span}.
*
* @param spanName The name of the returned Span.
* @param context The context containing the span and the span name.
*
* @return A {@code Span.Builder} to create and start a new {@code Span}.
*/
private Builder getSpanBuilder(String spanName, Context context) {
Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class);
if (spanNameKey == null) {
spanNameKey = spanName;
}
if (parentSpan == null) {
parentSpan = TRACER.getCurrentSpan();
}
return TRACER.spanBuilder(spanNameKey).setParent(parentSpan);
}
/**
* Returns the value of the specified key from the context.
*
* @param key The name of the attribute that needs to be extracted from the {@code Context}.
* @param defaultValue the value to return in data not found.
* @param clazz clazz the type of raw class to find data for.
* @param context The context containing the specified key.
*
* @return The T type of raw class object
*/
@SuppressWarnings("unchecked")
private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) {
final Optional<Object> optional = context.getData(key);
final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> {
logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz);
return defaultValue;
});
return (T) result;
}
} | class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer {
private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry");
static final String AZ_NAMESPACE_KEY = "az.namespace";
static final String MESSAGE_BUS_DESTINATION = "message_bus.destination";
static final String PEER_ENDPOINT = "peer.address";
private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class);
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Builder spanBuilder = getSpanBuilder(spanName, context);
Span span = spanBuilder.startSpan();
if (span.isRecording()) {
String tracingNamespace = getOrDefault(context, "az.tracing.namespace", null, String.class);
if (tracingNamespace != null) {
span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(tracingNamespace));
}
}
return context.addData(PARENT_SPAN_KEY, span);
}
/**
* {@inheritDoc}
*/
@Override
public Context start(String spanName, Context context, ProcessKind processKind) {
Objects.requireNonNull(spanName, "'spanName' cannot be null.");
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(processKind, "'processKind' cannot be null.");
Span span;
Builder spanBuilder;
switch (processKind) {
case SEND:
spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
return Context.NONE;
}
span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan();
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span);
case MESSAGE:
spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan();
if (span.isRecording()) {
span.setAttribute(AZ_NAMESPACE_KEY,
AttributeValue.stringAttributeValue(getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, "",
String.class)));
}
context = setContextData(span);
return context.addData(PARENT_SPAN_KEY, span);
case PROCESS:
return startScopedSpan(spanName, context);
default:
return Context.NONE;
}
}
/**
* {@inheritDoc}
*/
@Override
public void end(int responseCode, Throwable throwable, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
return;
}
if (span.isRecording()) {
span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable));
}
span.end();
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(String key, String value, Context context) {
Objects.requireNonNull(context, "'context' cannot be null");
if (CoreUtils.isNullOrEmpty(value)) {
logger.warning("Failed to set span attribute since value is null or empty.");
return;
}
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span != null) {
span.setAttribute(key, AttributeValue.stringAttributeValue(value));
} else {
logger.warning("Failed to find span to add attribute.");
}
}
/**
* {@inheritDoc}
*/
@Override
public Context setSpanName(String spanName, Context context) {
return context.addData(USER_SPAN_NAME_KEY, spanName);
}
/**
* {@inheritDoc}
*/
@Override
public void end(String statusMessage, Throwable throwable, Context context) {
final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
if (span == null) {
logger.warning("Failed to find span to end it.");
return;
}
if (span.isRecording()) {
span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable));
}
span.end();
}
@Override
public void addLink(Context context) {
final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class);
if (spanBuilder == null) {
logger.warning("Failed to find spanBuilder to link it.");
return;
}
final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext == null) {
logger.warning("Failed to find span context to link it.");
return;
}
spanBuilder.addLink(spanContext);
}
/**
* {@inheritDoc}
*/
@Override
public Context extractContext(String diagnosticId, Context context) {
return AmqpPropagationFormatUtil.extractContext(diagnosticId, context);
}
@Override
public Context getSharedSpanBuilder(String spanName, Context context) {
return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context));
}
/**
* Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context,
* to return an object that represents that scope.
* <p>The scope is exited when the returned object is closed.</p>
*
* @param spanName The name of the returned Span.
* @param context The {@link Context} containing the {@link SpanContext}.
*
* @return The returned {@link Span} and the scope in a {@link Context} object.
*/
private Context startScopedSpan(String spanName, Context context) {
Objects.requireNonNull(context, "'context' cannot be null.");
Span span;
SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class);
if (spanContext != null) {
span = startSpanWithRemoteParent(spanName, spanContext);
} else {
Builder spanBuilder = getSpanBuilder(spanName, context);
span = spanBuilder.setSpanKind(Span.Kind.CONSUMER).startSpan();
}
if (span.isRecording()) {
addSpanRequestAttributes(span, context, spanName);
}
return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span));
}
/**
* Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and
* designated by the {@link SpanContext}.
*
* @param spanName The name of the returned Span.
* @param spanContext The remote parent context of the returned Span.
*
* @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}.
*/
private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) {
Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext);
spanBuilder.setSpanKind(Span.Kind.CONSUMER);
return spanBuilder.startSpan();
}
/**
* Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as
* text and returns in a {@link Context} object.
*
* @param span The current tracing span.
*
* @return The {@link Context} containing the {@link SpanContext} and trace-parent of the current span.
*/
private static Context setContextData(Span span) {
SpanContext spanContext = span.getContext();
final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext);
return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext);
}
/**
* Extracts request attributes from the given {@code context} and adds it to the started span.
*
* @param span The span to which request attributes are to be added.
* @param context The context containing the request attributes.
* @param spanName The name of the returned Span containing the component value.
*/
/**
* Returns a {@link Builder} to create and start a new child {@link Span} with parent
* being the designated {@code Span}.
*
* @param spanName The name of the returned Span.
* @param context The context containing the span and the span name.
*
* @return A {@code Span.Builder} to create and start a new {@code Span}.
*/
private Builder getSpanBuilder(String spanName, Context context) {
Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class);
String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class);
if (spanNameKey == null) {
spanNameKey = spanName;
}
if (parentSpan == null) {
parentSpan = TRACER.getCurrentSpan();
}
return TRACER.spanBuilder(spanNameKey).setParent(parentSpan);
}
/**
* Returns the value of the specified key from the context.
*
* @param key The name of the attribute that needs to be extracted from the {@code Context}.
* @param defaultValue the value to return in data not found.
* @param clazz clazz the type of raw class to find data for.
* @param context The context containing the specified key.
*
* @return The T type of raw class object
*/
@SuppressWarnings("unchecked")
private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) {
final Optional<Object> optional = context.getData(key);
final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> {
logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz);
return defaultValue;
});
return (T) result;
}
} |
@alzimmermsft In the next month or so, we'll be deploying a service update that will set both `client-request-id` and `x-ms-client-request-id` on the response if either `return-client-request-id` or `x-ms-return-client-request-id` is specified. | public void getServiceStatsReturnsRequestId() {
SearchServiceClient serviceClient = getSearchServiceClientBuilder().buildClient();
UUID expectedUuid = UUID.randomUUID();
RequestOptions requestOptions = new RequestOptions().setClientRequestId(expectedUuid);
Response<ServiceStatistics> response = serviceClient.getServiceStatisticsWithResponse(requestOptions, Context.NONE);
assertEquals(expectedUuid.toString(), response.getHeaders().getValue("client-request-id"));
assertReflectionEquals(response.getValue(), getExpectedServiceStatistics(), IGNORE_DEFAULTS);
} | assertEquals(expectedUuid.toString(), response.getHeaders().getValue("client-request-id")); | public void getServiceStatsReturnsRequestId() {
SearchServiceClient serviceClient = getSearchServiceClientBuilder().buildClient();
RequestOptions requestOptions = new RequestOptions().setClientRequestId(UUID.randomUUID());
Response<ServiceStatistics> response = serviceClient.getServiceStatisticsWithResponse(requestOptions, Context.NONE);
/*
* The service will always return a request-id and will conditionally return client-request-id if
* return-client-request-id is set to true. If client-request-id is sent in the request then request-id will
* have the same value. This test validates that client-request-id is returned and that request-id is equal to
* it.
*/
String actualRequestId = response.getHeaders().getValue("request-id");
String actualClientRequestId = response.getHeaders().getValue("client-request-id");
Assertions.assertNotNull(actualClientRequestId);
Assertions.assertEquals(actualClientRequestId, actualRequestId);
assertReflectionEquals(response.getValue(), getExpectedServiceStatistics(), IGNORE_DEFAULTS);
} | class SearchServiceSyncTests extends SearchServiceTestBase {
@Test
public void getServiceStatsReturnsCorrectDefinition() {
SearchServiceClient serviceClient = getSearchServiceClientBuilder().buildClient();
ServiceStatistics serviceStatistics = serviceClient.getServiceStatistics();
assertReflectionEquals(serviceStatistics, getExpectedServiceStatistics(), IGNORE_DEFAULTS);
}
@Test
public void getServiceStatsReturnsCorrectDefinitionWithResponse() {
SearchServiceClient serviceClient = getSearchServiceClientBuilder().buildClient();
ServiceStatistics serviceStatistics = serviceClient.getServiceStatisticsWithResponse(generateRequestOptions(), Context.NONE).getValue();
assertReflectionEquals(serviceStatistics, getExpectedServiceStatistics(), IGNORE_DEFAULTS);
}
@Test
} | class SearchServiceSyncTests extends SearchServiceTestBase {
@Test
public void getServiceStatsReturnsCorrectDefinition() {
SearchServiceClient serviceClient = getSearchServiceClientBuilder().buildClient();
ServiceStatistics serviceStatistics = serviceClient.getServiceStatistics();
assertReflectionEquals(serviceStatistics, getExpectedServiceStatistics(), IGNORE_DEFAULTS);
}
@Test
public void getServiceStatsReturnsCorrectDefinitionWithResponse() {
SearchServiceClient serviceClient = getSearchServiceClientBuilder().buildClient();
ServiceStatistics serviceStatistics = serviceClient.getServiceStatisticsWithResponse(generateRequestOptions(), Context.NONE).getValue();
assertReflectionEquals(serviceStatistics, getExpectedServiceStatistics(), IGNORE_DEFAULTS);
}
@Test
} |
This can now be deleted. | protected void beforeTest() {
beforeTestSetup();
client = clientSetup(pipeline -> new KeyClientBuilder()
.pipeline(pipeline)
.vaultUrl(getEndpoint())
.buildAsyncClient());
} | .buildAsyncClient()); | protected void beforeTest() {
beforeTestSetup();
} | class KeyAsyncClientTest extends KeyClientTestBase {
private KeyAsyncClient client;
@Override
private void getKeyAsyncClient(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
client = clientSetup(pipeline -> new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(pipeline)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.serviceVersion(serviceVersion)
.buildAsyncClient());
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
setKeyRunner((expected) -> StepVerifier.create(client.createKey(expected))
.assertNext(response -> assertKeyEquals(expected, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.createKey("", KeyType.RSA))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
setKeyEmptyValueRunner((key) -> {
StepVerifier.create(client.createKey(key))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
});
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((original, updated) -> {
StepVerifier.create(client.createKey(original))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
KeyVaultKey keyToUpdate = client.getKey(original.getName()).block();
StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn())))
.assertNext(response -> {
assertNotNull(response);
assertEquals(original.getName(), response.getName());
}).verifyComplete();
StepVerifier.create(client.getKey(original.getName()))
.assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse))
.verifyComplete();
});
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((original, updated) -> {
StepVerifier.create(client.createKey(original))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
KeyVaultKey keyToUpdate = client.getKey(original.getName()).block();
StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn())))
.assertNext(response -> {
assertNotNull(response);
assertEquals(original.getName(), response.getName());
}).verifyComplete();
StepVerifier.create(client.getKey(original.getName()))
.assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse))
.verifyComplete();
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((original) -> {
StepVerifier.create(client.createKey(original))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
StepVerifier.create(client.getKey(original.getName()))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((key, keyWithNewVal) -> {
final KeyVaultKey keyVersionOne = client.createKey(key).block();
final KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal).block();
StepVerifier.create(client.getKey(key.getName(), keyVersionOne.getProperties().getVersion()))
.assertNext(response -> assertKeyEquals(key, response))
.verifyComplete();
StepVerifier.create(client.getKey(keyWithNewVal.getName(), keyVersionTwo.getProperties().getVersion()))
.assertNext(response -> assertKeyEquals(keyWithNewVal, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.getKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(client.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToDelete.getName());
AsyncPollResponse<DeletedKey, Void> deletedKeyPollResponse = poller
.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
DeletedKey deletedKeyResponse = deletedKeyPollResponse.getValue();
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToDelete.getName()))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
sleepInRecordMode(15000);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.getDeletedKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(client.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName());
AsyncPollResponse<DeletedKey, Void> deleteKeyPollResponse
= poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
assertNotNull(deleteKeyPollResponse.getValue());
PollerFlux<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName());
AsyncPollResponse<KeyVaultKey, Void> recoverKeyPollResponse
= recoverPoller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
KeyVaultKey keyResponse = recoverKeyPollResponse.getValue();
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(client.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(client.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.backupKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(client.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse)).verifyComplete();
byte[] backup = client.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName());
AsyncPollResponse<DeletedKey, Void> pollResponse = poller
.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
assertNotNull(pollResponse.getValue());
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepInRecordMode(60000);
StepVerifier.create(client.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(client.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(client.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName());
AsyncPollResponse<DeletedKey, Void> pollResponse = poller
.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
assertNotNull(pollResponse.getValue());
StepVerifier.create(client.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToDeleteAndGet.getName()))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
pollOnKeyPurge(keyToDeleteAndGet.getName());
sleepInRecordMode(15000);
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
if (!interceptorManager.isPlaybackMode()) {
return;
}
listDeletedKeysRunner((keys) -> {
List<DeletedKey> deletedKeys = new ArrayList<>();
for (CreateKeyOptions key : keys.values()) {
StepVerifier.create(client.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepInRecordMode(10000);
for (CreateKeyOptions key : keys.values()) {
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(key.getName());
AsyncPollResponse<DeletedKey, Void> response = poller.blockLast();
assertNotNull(response.getValue());
}
sleepInRecordMode(90000);
DeletedKey deletedKey = client.listDeletedKeys().map(actualKey -> {
deletedKeys.add(actualKey);
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
return actualKey;
}).blockLast();
assertNotNull(deletedKey);
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keys) -> {
List<KeyProperties> output = new ArrayList<>();
String keyName = null;
for (CreateKeyOptions key : keys) {
keyName = key.getName();
StepVerifier.create(client.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepInRecordMode(30000);
client.listPropertiesOfKeyVersions(keyName).subscribe(output::add);
sleepInRecordMode(30000);
assertEquals(keys.size(), output.size());
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyName);
AsyncPollResponse<DeletedKey, Void> pollResponse = poller.blockLast();
assertNotNull(pollResponse.getValue());
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyName))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
pollOnKeyPurge(keyName);
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keys) -> {
for (CreateKeyOptions key : keys.values()) {
assertKeyEquals(key, client.createKey(key).block());
}
sleepInRecordMode(10000);
client.listPropertiesOfKeys().map(actualKey -> {
if (keys.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keys.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keys.remove(actualKey.getName());
}
return actualKey;
}).blockLast();
assertEquals(0, keys.size());
});
}
private void pollOnKeyDeletion(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 30) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue();
} catch (ResourceNotFoundException e) {
}
if (deletedKey == null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s not found \n", keyName);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue();
} catch (ResourceNotFoundException e) {
}
if (deletedKey != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
private KeyAsyncClient client;
@Override
private void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
setKeyRunner((expected) -> StepVerifier.create(client.createKey(expected))
.assertNext(response -> assertKeyEquals(expected, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.createKey("", KeyType.RSA))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
setKeyEmptyValueRunner((key) -> {
StepVerifier.create(client.createKey(key))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
});
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((original, updated) -> {
StepVerifier.create(client.createKey(original))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
KeyVaultKey keyToUpdate = client.getKey(original.getName()).block();
StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn())))
.assertNext(response -> {
assertNotNull(response);
assertEquals(original.getName(), response.getName());
}).verifyComplete();
StepVerifier.create(client.getKey(original.getName()))
.assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse))
.verifyComplete();
});
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((original, updated) -> {
StepVerifier.create(client.createKey(original))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
KeyVaultKey keyToUpdate = client.getKey(original.getName()).block();
StepVerifier.create(client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn())))
.assertNext(response -> {
assertNotNull(response);
assertEquals(original.getName(), response.getName());
}).verifyComplete();
StepVerifier.create(client.getKey(original.getName()))
.assertNext(updatedKeyResponse -> assertKeyEquals(updated, updatedKeyResponse))
.verifyComplete();
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((original) -> {
StepVerifier.create(client.createKey(original))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
StepVerifier.create(client.getKey(original.getName()))
.assertNext(response -> assertKeyEquals(original, response))
.verifyComplete();
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((key, keyWithNewVal) -> {
final KeyVaultKey keyVersionOne = client.createKey(key).block();
final KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal).block();
StepVerifier.create(client.getKey(key.getName(), keyVersionOne.getProperties().getVersion()))
.assertNext(response -> assertKeyEquals(key, response))
.verifyComplete();
StepVerifier.create(client.getKey(keyWithNewVal.getName(), keyVersionTwo.getProperties().getVersion()))
.assertNext(response -> assertKeyEquals(keyWithNewVal, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.getKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(client.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToDelete.getName());
AsyncPollResponse<DeletedKey, Void> deletedKeyPollResponse = poller
.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
DeletedKey deletedKeyResponse = deletedKeyPollResponse.getValue();
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToDelete.getName()))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
sleepInRecordMode(15000);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.getDeletedKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(client.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName());
AsyncPollResponse<DeletedKey, Void> deleteKeyPollResponse
= poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
assertNotNull(deleteKeyPollResponse.getValue());
PollerFlux<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName());
AsyncPollResponse<KeyVaultKey, Void> recoverKeyPollResponse
= recoverPoller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
KeyVaultKey keyResponse = recoverKeyPollResponse.getValue();
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(client.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(client.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(client.backupKey("non-existing"))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(client.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse)).verifyComplete();
byte[] backup = client.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName());
AsyncPollResponse<DeletedKey, Void> pollResponse = poller
.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
assertNotNull(pollResponse.getValue());
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepInRecordMode(60000);
StepVerifier.create(client.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(client.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(ex -> assertRestException(ex, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(client.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName());
AsyncPollResponse<DeletedKey, Void> pollResponse = poller
.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.blockLast();
assertNotNull(pollResponse.getValue());
StepVerifier.create(client.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyToDeleteAndGet.getName()))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
pollOnKeyPurge(keyToDeleteAndGet.getName());
sleepInRecordMode(15000);
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (!interceptorManager.isPlaybackMode()) {
return;
}
listDeletedKeysRunner((keys) -> {
List<DeletedKey> deletedKeys = new ArrayList<>();
for (CreateKeyOptions key : keys.values()) {
StepVerifier.create(client.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepInRecordMode(10000);
for (CreateKeyOptions key : keys.values()) {
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(key.getName());
AsyncPollResponse<DeletedKey, Void> response = poller.blockLast();
assertNotNull(response.getValue());
}
sleepInRecordMode(90000);
DeletedKey deletedKey = client.listDeletedKeys().map(actualKey -> {
deletedKeys.add(actualKey);
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
return actualKey;
}).blockLast();
assertNotNull(deletedKey);
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keys) -> {
List<KeyProperties> output = new ArrayList<>();
String keyName = null;
for (CreateKeyOptions key : keys) {
keyName = key.getName();
StepVerifier.create(client.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepInRecordMode(30000);
client.listPropertiesOfKeyVersions(keyName).subscribe(output::add);
sleepInRecordMode(30000);
assertEquals(keys.size(), output.size());
PollerFlux<DeletedKey, Void> poller = client.beginDeleteKey(keyName);
AsyncPollResponse<DeletedKey, Void> pollResponse = poller.blockLast();
assertNotNull(pollResponse.getValue());
StepVerifier.create(client.purgeDeletedKeyWithResponse(keyName))
.assertNext(voidResponse -> {
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode());
}).verifyComplete();
pollOnKeyPurge(keyName);
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keys) -> {
for (CreateKeyOptions key : keys.values()) {
assertKeyEquals(key, client.createKey(key).block());
}
sleepInRecordMode(10000);
client.listPropertiesOfKeys().map(actualKey -> {
if (keys.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keys.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keys.remove(actualKey.getName());
}
return actualKey;
}).blockLast();
assertEquals(0, keys.size());
});
}
private void pollOnKeyDeletion(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 30) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue();
} catch (ResourceNotFoundException e) {
}
if (deletedKey == null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s not found \n", keyName);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKeyWithResponse(keyName).block().getValue();
} catch (ResourceNotFoundException e) {
}
if (deletedKey != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
This can be deleted. | protected void beforeTest() {
beforeTestSetup();
client = clientSetup(pipeline -> new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(pipeline)
.buildClient());
} | .buildClient()); | protected void beforeTest() {
beforeTestSetup();
} | class KeyClientTest extends KeyClientTestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private KeyClient client;
@Override
private void getKeyClient(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
client = clientSetup(pipeline -> new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(pipeline)
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.serviceVersion(serviceVersion)
.buildClient());
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyRunner((expected) -> assertKeyEquals(expected, client.createKey(expected)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.createKey("", KeyType.RSA), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyEmptyValueRunner((key) -> {
assertRestException(() -> client.createKey(key.getName(), key.getKeyType()), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
});
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that a key is able to be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeyRunner((original) -> {
client.createKey(original);
assertKeyEquals(original, client.getKey(original.getName()));
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((key, keyWithNewVal) -> {
KeyVaultKey keyVersionOne = client.createKey(key);
KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal);
assertKeyEquals(key, client.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewVal, client.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
assertKeyEquals(keyToDelete, client.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = client.beginDeleteKey(keyToDelete.getName());
PollResponse<DeletedKey> pollResponse = deletedKeyPoller.poll();
DeletedKey deletedKey = pollResponse.getValue();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(2000);
pollResponse = deletedKeyPoller.poll();
}
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDelete.getName());
pollOnKeyPurge(keyToDelete.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, client.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
SyncPoller<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName());
PollResponse<KeyVaultKey> recoverPollResponse = recoverPoller.poll();
KeyVaultKey recoveredKey = recoverPollResponse.getValue();
recoverPollResponse = recoverPoller.poll();
while (!recoverPollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
recoverPollResponse = recoverPoller.poll();
}
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, client.createKey(keyToBackup));
byte[] backupBytes = (client.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.backupKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, client.createKey(keyToBackupAndRestore));
byte[] backupBytes = (client.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepInRecordMode(60000);
KeyVaultKey restoredKey = client.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreKeyBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToList = keys;
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, client.createKey(key));
sleepInRecordMode(5000);
}
for (KeyProperties actualKey : client.listPropertiesOfKeys()) {
if (keys.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keys.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keys.remove(actualKey.getName());
}
}
assertEquals(0, keys.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, client.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
sleepInRecordMode(30000);
DeletedKey deletedKey = client.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDeleteAndGet.getName());
pollOnKeyPurge(keyToDeleteAndGet.getName());
sleepInRecordMode(10000);
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listDeletedKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToDelete = keys;
for (CreateKeyOptions key : keysToDelete.values()) {
assertKeyEquals(key, client.createKey(key));
}
for (CreateKeyOptions key : keysToDelete.values()) {
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(key.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
}
sleepInRecordMode(300000);
Iterable<DeletedKey> deletedKeys = client.listDeletedKeys();
assertTrue(deletedKeys.iterator().hasNext());
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keys) -> {
List<CreateKeyOptions> keyVersions = keys;
String keyName = null;
for (CreateKeyOptions key : keyVersions) {
keyName = key.getName();
assertKeyEquals(key, client.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = client.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keyVersions.size(), keyVersionsList.size());
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyName);
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyName);
pollOnKeyPurge(keyName);
});
}
private DeletedKey pollOnKeyDeletion(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 30) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey == null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s not found \n", keyName);
return null;
}
private DeletedKey pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
return null;
}
} | class KeyClientTest extends KeyClientTestBase {
private KeyClient client;
@Override
private void getKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyRunner((expected) -> assertKeyEquals(expected, client.createKey(expected)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.createKey("", KeyType.RSA), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyEmptyValueRunner((key) -> {
assertRestException(() -> client.createKey(key.getName(), key.getKeyType()), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
});
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that a key is able to be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeyRunner((original) -> {
client.createKey(original);
assertKeyEquals(original, client.getKey(original.getName()));
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((key, keyWithNewVal) -> {
KeyVaultKey keyVersionOne = client.createKey(key);
KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal);
assertKeyEquals(key, client.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewVal, client.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
assertKeyEquals(keyToDelete, client.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = client.beginDeleteKey(keyToDelete.getName());
PollResponse<DeletedKey> pollResponse = deletedKeyPoller.poll();
DeletedKey deletedKey = pollResponse.getValue();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(2000);
pollResponse = deletedKeyPoller.poll();
}
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDelete.getName());
pollOnKeyPurge(keyToDelete.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, client.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
SyncPoller<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName());
PollResponse<KeyVaultKey> recoverPollResponse = recoverPoller.poll();
KeyVaultKey recoveredKey = recoverPollResponse.getValue();
recoverPollResponse = recoverPoller.poll();
while (!recoverPollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
recoverPollResponse = recoverPoller.poll();
}
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, client.createKey(keyToBackup));
byte[] backupBytes = (client.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.backupKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, client.createKey(keyToBackupAndRestore));
byte[] backupBytes = (client.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepInRecordMode(60000);
KeyVaultKey restoredKey = client.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreKeyBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToList = keys;
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, client.createKey(key));
sleepInRecordMode(5000);
}
for (KeyProperties actualKey : client.listPropertiesOfKeys()) {
if (keys.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keys.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keys.remove(actualKey.getName());
}
}
assertEquals(0, keys.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, client.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
sleepInRecordMode(30000);
DeletedKey deletedKey = client.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDeleteAndGet.getName());
pollOnKeyPurge(keyToDeleteAndGet.getName());
sleepInRecordMode(10000);
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listDeletedKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToDelete = keys;
for (CreateKeyOptions key : keysToDelete.values()) {
assertKeyEquals(key, client.createKey(key));
}
for (CreateKeyOptions key : keysToDelete.values()) {
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(key.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
}
sleepInRecordMode(300000);
Iterable<DeletedKey> deletedKeys = client.listDeletedKeys();
assertTrue(deletedKeys.iterator().hasNext());
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keys) -> {
List<CreateKeyOptions> keyVersions = keys;
String keyName = null;
for (CreateKeyOptions key : keyVersions) {
keyName = key.getName();
assertKeyEquals(key, client.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = client.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keyVersions.size(), keyVersionsList.size());
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyName);
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyName);
pollOnKeyPurge(keyName);
});
}
private DeletedKey pollOnKeyDeletion(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 30) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey == null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s not found \n", keyName);
return null;
}
private DeletedKey pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
return null;
}
} |
The service version should be set on the builder otherwise it will always use the latest. | private void getKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.buildClient();
} | .buildClient(); | private void getKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new KeyClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildClient();
} | class KeyClientTest extends KeyClientTestBase {
private KeyClient client;
@Override
protected void beforeTest() {
beforeTestSetup();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyRunner((expected) -> assertKeyEquals(expected, client.createKey(expected)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.createKey("", KeyType.RSA), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyEmptyValueRunner((key) -> {
assertRestException(() -> client.createKey(key.getName(), key.getKeyType()), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
});
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that a key is able to be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeyRunner((original) -> {
client.createKey(original);
assertKeyEquals(original, client.getKey(original.getName()));
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((key, keyWithNewVal) -> {
KeyVaultKey keyVersionOne = client.createKey(key);
KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal);
assertKeyEquals(key, client.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewVal, client.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
assertKeyEquals(keyToDelete, client.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = client.beginDeleteKey(keyToDelete.getName());
PollResponse<DeletedKey> pollResponse = deletedKeyPoller.poll();
DeletedKey deletedKey = pollResponse.getValue();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(2000);
pollResponse = deletedKeyPoller.poll();
}
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDelete.getName());
pollOnKeyPurge(keyToDelete.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, client.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
SyncPoller<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName());
PollResponse<KeyVaultKey> recoverPollResponse = recoverPoller.poll();
KeyVaultKey recoveredKey = recoverPollResponse.getValue();
recoverPollResponse = recoverPoller.poll();
while (!recoverPollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
recoverPollResponse = recoverPoller.poll();
}
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, client.createKey(keyToBackup));
byte[] backupBytes = (client.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.backupKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, client.createKey(keyToBackupAndRestore));
byte[] backupBytes = (client.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepInRecordMode(60000);
KeyVaultKey restoredKey = client.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreKeyBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToList = keys;
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, client.createKey(key));
sleepInRecordMode(5000);
}
for (KeyProperties actualKey : client.listPropertiesOfKeys()) {
if (keys.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keys.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keys.remove(actualKey.getName());
}
}
assertEquals(0, keys.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, client.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
sleepInRecordMode(30000);
DeletedKey deletedKey = client.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDeleteAndGet.getName());
pollOnKeyPurge(keyToDeleteAndGet.getName());
sleepInRecordMode(10000);
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listDeletedKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToDelete = keys;
for (CreateKeyOptions key : keysToDelete.values()) {
assertKeyEquals(key, client.createKey(key));
}
for (CreateKeyOptions key : keysToDelete.values()) {
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(key.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
}
sleepInRecordMode(300000);
Iterable<DeletedKey> deletedKeys = client.listDeletedKeys();
assertTrue(deletedKeys.iterator().hasNext());
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keys) -> {
List<CreateKeyOptions> keyVersions = keys;
String keyName = null;
for (CreateKeyOptions key : keyVersions) {
keyName = key.getName();
assertKeyEquals(key, client.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = client.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keyVersions.size(), keyVersionsList.size());
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyName);
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyName);
pollOnKeyPurge(keyName);
});
}
private DeletedKey pollOnKeyDeletion(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 30) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey == null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s not found \n", keyName);
return null;
}
private DeletedKey pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
return null;
}
} | class KeyClientTest extends KeyClientTestBase {
private KeyClient client;
@Override
protected void beforeTest() {
beforeTestSetup();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyRunner((expected) -> assertKeyEquals(expected, client.createKey(expected)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.createKey("", KeyType.RSA), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
setKeyEmptyValueRunner((key) -> {
assertRestException(() -> client.createKey(key.getName(), key.getKeyType()), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
});
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
assertRunnableThrowsException(() -> client.createKey(null), NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that a key is able to be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((original, updated) -> {
assertKeyEquals(original, client.createKey(original));
KeyVaultKey keyToUpdate = client.getKey(original.getName());
client.updateKeyProperties(keyToUpdate.getProperties().setExpiresOn(updated.getExpiresOn()));
assertKeyEquals(updated, client.getKey(original.getName()));
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeyRunner((original) -> {
client.createKey(original);
assertKeyEquals(original, client.getKey(original.getName()));
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((key, keyWithNewVal) -> {
KeyVaultKey keyVersionOne = client.createKey(key);
KeyVaultKey keyVersionTwo = client.createKey(keyWithNewVal);
assertKeyEquals(key, client.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewVal, client.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
assertKeyEquals(keyToDelete, client.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = client.beginDeleteKey(keyToDelete.getName());
PollResponse<DeletedKey> pollResponse = deletedKeyPoller.poll();
DeletedKey deletedKey = pollResponse.getValue();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(2000);
pollResponse = deletedKeyPoller.poll();
}
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDelete.getName());
pollOnKeyPurge(keyToDelete.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, client.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndRecover.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
SyncPoller<KeyVaultKey, Void> recoverPoller = client.beginRecoverDeletedKey(keyToDeleteAndRecover.getName());
PollResponse<KeyVaultKey> recoverPollResponse = recoverPoller.poll();
KeyVaultKey recoveredKey = recoverPollResponse.getValue();
recoverPollResponse = recoverPoller.poll();
while (!recoverPollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
recoverPollResponse = recoverPoller.poll();
}
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, client.createKey(keyToBackup));
byte[] backupBytes = (client.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
assertRestException(() -> client.backupKey("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, client.createKey(keyToBackupAndRestore));
byte[] backupBytes = (client.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToBackupAndRestore.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepInRecordMode(60000);
KeyVaultKey restoredKey = client.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreKeyBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToList = keys;
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, client.createKey(key));
sleepInRecordMode(5000);
}
for (KeyProperties actualKey : client.listPropertiesOfKeys()) {
if (keys.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keys.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keys.remove(actualKey.getName());
}
}
assertEquals(0, keys.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, client.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyToDeleteAndGet.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
sleepInRecordMode(30000);
DeletedKey deletedKey = client.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
client.purgeDeletedKey(keyToDeleteAndGet.getName());
pollOnKeyPurge(keyToDeleteAndGet.getName());
sleepInRecordMode(10000);
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listDeletedKeysRunner((keys) -> {
HashMap<String, CreateKeyOptions> keysToDelete = keys;
for (CreateKeyOptions key : keysToDelete.values()) {
assertKeyEquals(key, client.createKey(key));
}
for (CreateKeyOptions key : keysToDelete.values()) {
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(key.getName());
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
}
sleepInRecordMode(300000);
Iterable<DeletedKey> deletedKeys = client.listDeletedKeys();
assertTrue(deletedKeys.iterator().hasNext());
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
getKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keys) -> {
List<CreateKeyOptions> keyVersions = keys;
String keyName = null;
for (CreateKeyOptions key : keyVersions) {
keyName = key.getName();
assertKeyEquals(key, client.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = client.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keyVersions.size(), keyVersionsList.size());
SyncPoller<DeletedKey, Void> poller = client.beginDeleteKey(keyName);
PollResponse<DeletedKey> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
client.purgeDeletedKey(keyName);
pollOnKeyPurge(keyName);
});
}
private DeletedKey pollOnKeyDeletion(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 30) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey == null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s not found \n", keyName);
return null;
}
private DeletedKey pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = client.getDeletedKey(keyName);
} catch (ResourceNotFoundException e) {
}
if (deletedKey != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedKey;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
return null;
}
} |
Why's getCertificateClient() called twice? | public void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
createCertificateRunner((policy) -> {
getCertificateClient(httpClient, serviceVersion);
String certName = generateResourceId("testCer");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
policy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy expected = certPoller.getFinalResult();
assertEquals(certName, expected.getName());
assertNotNull(expected.getProperties().getCreatedOn());
deleteAndPurgeCertificate(certName);
});
} | getCertificateClient(httpClient, serviceVersion); | public void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
createCertificateRunner((policy) -> {
String certName = generateResourceId("testCer");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
policy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy expected = certPoller.getFinalResult();
assertEquals(certName, expected.getName());
assertNotNull(expected.getProperties().getCreatedOn());
deleteAndPurgeCertificate(certName);
});
} | class CertificateClientTest extends CertificateClientTestBase {
private CertificateClient client;
@Override
protected void beforeTest() {
beforeTestSetup();
}
private void getCertificateClient(HttpClient httpClient,
CertificateServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new CertificateClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
private void deleteAndPurgeCertificate(String certName) {
SyncPoller<DeletedCertificate, Void> deletePoller = client.beginDeleteCertificate(certName);
deletePoller.poll();
deletePoller.waitForCompletion();
client.purgeDeletedCertificate(certName);
pollOnCertificatePurge(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginCreateCertificate("", CertificatePolicy.getDefault()),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(generateResourceId("tempCert"), null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(null, null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
updateCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate2");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), true, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
deleteAndPurgeCertificate(certName);
});
}
private void validateMapResponse(Map<String, String> expected, Map<String, String> returned) {
for (String key : expected.keySet()) {
String val = returned.get(key);
String expectedVal = expected.get(key);
assertEquals(expectedVal, val);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
updateDisabledCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate3");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), false, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
assertFalse(keyVaultCertificate.getProperties().isEnabled());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificateWithPolicy getCertificate = client.getCertificate(certificateName);
validatePolicy(certificate.getPolicy(), getCertificate.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateSpecificVersionRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate getCertificate = client.getCertificateVersion(certificateName, certificate.getProperties().getVersion());
validateCertificate(certificate, getCertificate);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
deleteCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getDeletedCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
deletedCertificate = client.getDeletedCertificate(certificateName);
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificate createdCertificate = certPoller.getFinalResult();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
deletedKeyPoller.waitForCompletion();
SyncPoller<KeyVaultCertificateWithPolicy, Void> recoverPoller = client.beginRecoverDeletedCertificate(certificateName);
PollResponse<KeyVaultCertificateWithPolicy> recoverPollResponse = recoverPoller.poll();
KeyVaultCertificate recoveredCert = recoverPollResponse.getValue();
recoverPoller.waitForCompletion();
assertEquals(certificateName, recoveredCert.getName());
validateCertificate(createdCertificate, recoveredCert);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
backupCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
restoreCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy createdCert = certPoller.getFinalResult();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
deleteAndPurgeCertificate(certificateName);
sleepInRecordMode(40000);
KeyVaultCertificateWithPolicy restoredCertificate = client.restoreCertificateBackup(backupBytes);
assertEquals(certificateName, restoredCertificate.getName());
validatePolicy(restoredCertificate.getPolicy(), createdCert.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> retrievePoller = client.getCertificateOperation(certName);
retrievePoller.waitForCompletion();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy reteievedCert = retrievePoller.getFinalResult();
KeyVaultCertificateWithPolicy expectedCert = certPoller.getFinalResult();
validateCertificate(expectedCert, reteievedCert);
validatePolicy(expectedCert.getPolicy(),
reteievedCert.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
cancelCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.poll();
certPoller.cancelOperation();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
assertEquals(false, certificate.getProperties().isEnabled());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
deleteCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.waitForCompletion();
CertificateOperation certificateOperation = client.deleteCertificateOperation(certName);
assertEquals("completed", certificateOperation.getStatus());
assertRestException(() -> client.deleteCertificateOperation(certName), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
validatePolicy(setupPolicy(), certificate.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
updateCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
certificate.getPolicy().setExportable(false);
CertificatePolicy policy = client.updateCertificatePolicy(certName, certificate.getPolicy());
validatePolicy(certificate.getPolicy(), policy);
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreCertificateBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
listCertificatesRunner((certificates) -> {
HashSet<String> certificatesToList = new HashSet<>(certificates);
for (String certName : certificatesToList) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
sleepInRecordMode(90000);
for (CertificateProperties actualKey : client.listPropertiesOfCertificates()) {
if (certificatesToList.contains(actualKey.getName())) {
certificatesToList.remove(actualKey.getName());
}
}
assertEquals(0, certificatesToList.size());
for (String certName : certificates) {
deleteAndPurgeCertificate(certName);
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
createIssuereRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
validateIssuer(issuer, createdIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", "")),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNullProvider(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", null)),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createIssuer(null), NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer retrievedIssuer = client.getIssuer(issuer.getName());
validateIssuer(issuer, retrievedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
deleteCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer deletedIssuer = client.deleteIssuer(issuer.getName());
validateIssuer(issuer, deletedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
listCertificateIssuersRunner((certificateIssuers) -> {
HashMap<String, CertificateIssuer> certificateIssuersToList = new HashMap<>(certificateIssuers);
for (CertificateIssuer issuer : certificateIssuersToList.values()) {
CertificateIssuer certificateIssuer = client.createIssuer(issuer);
validateIssuer(issuer, certificateIssuer);
}
for (IssuerProperties issuerProperties : client.listPropertiesOfIssuers()) {
if (certificateIssuersToList.containsKey(issuerProperties.getName())) {
certificateIssuersToList.remove(issuerProperties.getName());
}
}
assertEquals(0, certificateIssuersToList.size());
for (CertificateIssuer issuer : certificateIssuers.values()) {
client.deleteIssuer(issuer.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
client.deleteContacts();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
sleepInRecordMode(6000);
client.listContacts().stream().forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
PagedIterable<CertificateContact> certificateContacts = client.deleteContacts();
validateContact(setupContact(), certificateContacts.iterator().next());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificateOperation("non-existing").poll(), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificatePolicy("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
String certName = generateResourceId("testListCertVersion");
int counter = 5;
for (int i = 0; i < counter; i++) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
int countRecv = 0;
for (CertificateProperties certificateProperties : client.listPropertiesOfCertificateVersions(certName)) {
countRecv++;
assertEquals(certificateProperties.getName(), certName);
}
assertEquals(counter, countRecv);
deleteAndPurgeCertificate(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
listDeletedCertificatesRunner((certificates) -> {
HashSet<String> certificatesToDelete = new HashSet<>(certificates);
for (String certName : certificatesToDelete) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
PollResponse<CertificateOperation> pollResponse = certPoller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = certPoller.poll();
}
}
for (String certName : certificates) {
SyncPoller<DeletedCertificate, Void> poller = client.beginDeleteCertificate(certName);
PollResponse<DeletedCertificate> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
}
sleepInRecordMode(90000);
Iterable<DeletedCertificate> deletedCertificates = client.listDeletedCertificates();
assertTrue(deletedCertificates.iterator().hasNext());
for (DeletedCertificate deletedCertificate : deletedCertificates) {
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
certificatesToDelete.remove(deletedCertificate.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
importCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertTrue(toHexString(importedCertificate.getProperties().getX509Thumbprint()).equalsIgnoreCase("7cb8b7539d87ba7215357b9b9049dff2d3fa59ba"));
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
X509Certificate x509Certificate = null;
try {
x509Certificate = loadCerToX509Certificate(importedCertificate);
} catch (CertificateException e) {
e.printStackTrace();
fail();
} catch (IOException e) {
e.printStackTrace();
fail();
}
assertTrue(x509Certificate.getSubjectX500Principal().getName().equals("CN=KeyVaultTest"));
assertTrue(x509Certificate.getIssuerX500Principal().getName().equals("CN=Root Agency"));
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.mergeCertificate(new MergeCertificateOptions(generateResourceId("testCert16"), Arrays.asList("test".getBytes()))),
HttpResponseException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException {
getCertificateClient(httpClient, serviceVersion);
importPemCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
assertEquals(CertificateContentType.PEM, importedCertificate.getPolicy().getContentType());
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
private DeletedCertificate pollOnCertificatePurge(String certificateName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedCertificate deletedCertificate = null;
try {
deletedCertificate = client.getDeletedCertificate(certificateName);
} catch (ResourceNotFoundException e) {
}
if (deletedCertificate != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedCertificate;
}
}
System.err.printf("Deleted Key %s was not purged \n", certificateName);
return null;
}
} | class CertificateClientTest extends CertificateClientTestBase {
private CertificateClient client;
@Override
protected void beforeTest() {
beforeTestSetup();
}
private void createCertificateClient(HttpClient httpClient,
CertificateServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new CertificateClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
private void deleteAndPurgeCertificate(String certName) {
SyncPoller<DeletedCertificate, Void> deletePoller = client.beginDeleteCertificate(certName);
deletePoller.poll();
deletePoller.waitForCompletion();
client.purgeDeletedCertificate(certName);
pollOnCertificatePurge(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginCreateCertificate("", CertificatePolicy.getDefault()),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(generateResourceId("tempCert"), null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(null, null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
updateCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate2");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), true, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
deleteAndPurgeCertificate(certName);
});
}
private void validateMapResponse(Map<String, String> expected, Map<String, String> returned) {
for (String key : expected.keySet()) {
String val = returned.get(key);
String expectedVal = expected.get(key);
assertEquals(expectedVal, val);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
updateDisabledCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate3");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), false, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
assertFalse(keyVaultCertificate.getProperties().isEnabled());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificateWithPolicy getCertificate = client.getCertificate(certificateName);
validatePolicy(certificate.getPolicy(), getCertificate.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateSpecificVersionRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate getCertificate = client.getCertificateVersion(certificateName, certificate.getProperties().getVersion());
validateCertificate(certificate, getCertificate);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
deleteCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getDeletedCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
deletedCertificate = client.getDeletedCertificate(certificateName);
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificate createdCertificate = certPoller.getFinalResult();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
deletedKeyPoller.waitForCompletion();
SyncPoller<KeyVaultCertificateWithPolicy, Void> recoverPoller = client.beginRecoverDeletedCertificate(certificateName);
PollResponse<KeyVaultCertificateWithPolicy> recoverPollResponse = recoverPoller.poll();
KeyVaultCertificate recoveredCert = recoverPollResponse.getValue();
recoverPoller.waitForCompletion();
assertEquals(certificateName, recoveredCert.getName());
validateCertificate(createdCertificate, recoveredCert);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
backupCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
restoreCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy createdCert = certPoller.getFinalResult();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
deleteAndPurgeCertificate(certificateName);
sleepInRecordMode(40000);
KeyVaultCertificateWithPolicy restoredCertificate = client.restoreCertificateBackup(backupBytes);
assertEquals(certificateName, restoredCertificate.getName());
validatePolicy(restoredCertificate.getPolicy(), createdCert.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> retrievePoller = client.getCertificateOperation(certName);
retrievePoller.waitForCompletion();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy reteievedCert = retrievePoller.getFinalResult();
KeyVaultCertificateWithPolicy expectedCert = certPoller.getFinalResult();
validateCertificate(expectedCert, reteievedCert);
validatePolicy(expectedCert.getPolicy(),
reteievedCert.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
cancelCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.poll();
certPoller.cancelOperation();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
assertEquals(false, certificate.getProperties().isEnabled());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
deleteCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.waitForCompletion();
CertificateOperation certificateOperation = client.deleteCertificateOperation(certName);
assertEquals("completed", certificateOperation.getStatus());
assertRestException(() -> client.deleteCertificateOperation(certName), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
validatePolicy(setupPolicy(), certificate.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
updateCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
certificate.getPolicy().setExportable(false);
CertificatePolicy policy = client.updateCertificatePolicy(certName, certificate.getPolicy());
validatePolicy(certificate.getPolicy(), policy);
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreCertificateBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
listCertificatesRunner((certificates) -> {
HashSet<String> certificatesToList = new HashSet<>(certificates);
for (String certName : certificatesToList) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
sleepInRecordMode(90000);
for (CertificateProperties actualKey : client.listPropertiesOfCertificates()) {
if (certificatesToList.contains(actualKey.getName())) {
certificatesToList.remove(actualKey.getName());
}
}
assertEquals(0, certificatesToList.size());
for (String certName : certificates) {
deleteAndPurgeCertificate(certName);
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
createIssuereRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
validateIssuer(issuer, createdIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", "")),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNullProvider(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", null)),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createIssuer(null), NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer retrievedIssuer = client.getIssuer(issuer.getName());
validateIssuer(issuer, retrievedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
deleteCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer deletedIssuer = client.deleteIssuer(issuer.getName());
validateIssuer(issuer, deletedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
listCertificateIssuersRunner((certificateIssuers) -> {
HashMap<String, CertificateIssuer> certificateIssuersToList = new HashMap<>(certificateIssuers);
for (CertificateIssuer issuer : certificateIssuersToList.values()) {
CertificateIssuer certificateIssuer = client.createIssuer(issuer);
validateIssuer(issuer, certificateIssuer);
}
for (IssuerProperties issuerProperties : client.listPropertiesOfIssuers()) {
if (certificateIssuersToList.containsKey(issuerProperties.getName())) {
certificateIssuersToList.remove(issuerProperties.getName());
}
}
assertEquals(0, certificateIssuersToList.size());
for (CertificateIssuer issuer : certificateIssuers.values()) {
client.deleteIssuer(issuer.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
client.deleteContacts();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
sleepInRecordMode(6000);
client.listContacts().stream().forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
PagedIterable<CertificateContact> certificateContacts = client.deleteContacts();
validateContact(setupContact(), certificateContacts.iterator().next());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificateOperation("non-existing").poll(), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificatePolicy("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
String certName = generateResourceId("testListCertVersion");
int counter = 5;
for (int i = 0; i < counter; i++) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
int countRecv = 0;
for (CertificateProperties certificateProperties : client.listPropertiesOfCertificateVersions(certName)) {
countRecv++;
assertEquals(certificateProperties.getName(), certName);
}
assertEquals(counter, countRecv);
deleteAndPurgeCertificate(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
listDeletedCertificatesRunner((certificates) -> {
HashSet<String> certificatesToDelete = new HashSet<>(certificates);
for (String certName : certificatesToDelete) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
PollResponse<CertificateOperation> pollResponse = certPoller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = certPoller.poll();
}
}
for (String certName : certificates) {
SyncPoller<DeletedCertificate, Void> poller = client.beginDeleteCertificate(certName);
PollResponse<DeletedCertificate> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
}
sleepInRecordMode(90000);
Iterable<DeletedCertificate> deletedCertificates = client.listDeletedCertificates();
assertTrue(deletedCertificates.iterator().hasNext());
for (DeletedCertificate deletedCertificate : deletedCertificates) {
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
certificatesToDelete.remove(deletedCertificate.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
importCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertTrue(toHexString(importedCertificate.getProperties().getX509Thumbprint()).equalsIgnoreCase("7cb8b7539d87ba7215357b9b9049dff2d3fa59ba"));
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
X509Certificate x509Certificate = null;
try {
x509Certificate = loadCerToX509Certificate(importedCertificate);
} catch (CertificateException e) {
e.printStackTrace();
fail();
} catch (IOException e) {
e.printStackTrace();
fail();
}
assertTrue(x509Certificate.getSubjectX500Principal().getName().equals("CN=KeyVaultTest"));
assertTrue(x509Certificate.getIssuerX500Principal().getName().equals("CN=Root Agency"));
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.mergeCertificate(new MergeCertificateOptions(generateResourceId("testCert16"), Arrays.asList("test".getBytes()))),
HttpResponseException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException {
createCertificateClient(httpClient, serviceVersion);
importPemCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
assertEquals(CertificateContentType.PEM, importedCertificate.getPolicy().getContentType());
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
private DeletedCertificate pollOnCertificatePurge(String certificateName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedCertificate deletedCertificate = null;
try {
deletedCertificate = client.getDeletedCertificate(certificateName);
} catch (ResourceNotFoundException e) {
}
if (deletedCertificate != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedCertificate;
}
}
System.err.printf("Deleted Key %s was not purged \n", certificateName);
return null;
}
} |
nit: delete this line | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} | |
This should check the environment variable before returning true. | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | return true; | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} |
The file name was changed. Was this intended? | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | byte[] certificateContent = readCertificate("certificate.pem"); | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} |
Why was this change required? | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | pemPath = pemPath.substring(1); | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} |
Paste error. Removed. | public void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
createCertificateRunner((policy) -> {
getCertificateClient(httpClient, serviceVersion);
String certName = generateResourceId("testCer");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
policy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy expected = certPoller.getFinalResult();
assertEquals(certName, expected.getName());
assertNotNull(expected.getProperties().getCreatedOn());
deleteAndPurgeCertificate(certName);
});
} | getCertificateClient(httpClient, serviceVersion); | public void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
createCertificateRunner((policy) -> {
String certName = generateResourceId("testCer");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
policy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy expected = certPoller.getFinalResult();
assertEquals(certName, expected.getName());
assertNotNull(expected.getProperties().getCreatedOn());
deleteAndPurgeCertificate(certName);
});
} | class CertificateClientTest extends CertificateClientTestBase {
private CertificateClient client;
@Override
protected void beforeTest() {
beforeTestSetup();
}
private void getCertificateClient(HttpClient httpClient,
CertificateServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new CertificateClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
private void deleteAndPurgeCertificate(String certName) {
SyncPoller<DeletedCertificate, Void> deletePoller = client.beginDeleteCertificate(certName);
deletePoller.poll();
deletePoller.waitForCompletion();
client.purgeDeletedCertificate(certName);
pollOnCertificatePurge(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginCreateCertificate("", CertificatePolicy.getDefault()),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(generateResourceId("tempCert"), null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(null, null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
updateCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate2");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), true, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
deleteAndPurgeCertificate(certName);
});
}
private void validateMapResponse(Map<String, String> expected, Map<String, String> returned) {
for (String key : expected.keySet()) {
String val = returned.get(key);
String expectedVal = expected.get(key);
assertEquals(expectedVal, val);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
updateDisabledCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate3");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), false, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
assertFalse(keyVaultCertificate.getProperties().isEnabled());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificateWithPolicy getCertificate = client.getCertificate(certificateName);
validatePolicy(certificate.getPolicy(), getCertificate.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateSpecificVersionRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate getCertificate = client.getCertificateVersion(certificateName, certificate.getProperties().getVersion());
validateCertificate(certificate, getCertificate);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
deleteCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getDeletedCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
deletedCertificate = client.getDeletedCertificate(certificateName);
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificate createdCertificate = certPoller.getFinalResult();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
deletedKeyPoller.waitForCompletion();
SyncPoller<KeyVaultCertificateWithPolicy, Void> recoverPoller = client.beginRecoverDeletedCertificate(certificateName);
PollResponse<KeyVaultCertificateWithPolicy> recoverPollResponse = recoverPoller.poll();
KeyVaultCertificate recoveredCert = recoverPollResponse.getValue();
recoverPoller.waitForCompletion();
assertEquals(certificateName, recoveredCert.getName());
validateCertificate(createdCertificate, recoveredCert);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
backupCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
restoreCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy createdCert = certPoller.getFinalResult();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
deleteAndPurgeCertificate(certificateName);
sleepInRecordMode(40000);
KeyVaultCertificateWithPolicy restoredCertificate = client.restoreCertificateBackup(backupBytes);
assertEquals(certificateName, restoredCertificate.getName());
validatePolicy(restoredCertificate.getPolicy(), createdCert.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> retrievePoller = client.getCertificateOperation(certName);
retrievePoller.waitForCompletion();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy reteievedCert = retrievePoller.getFinalResult();
KeyVaultCertificateWithPolicy expectedCert = certPoller.getFinalResult();
validateCertificate(expectedCert, reteievedCert);
validatePolicy(expectedCert.getPolicy(),
reteievedCert.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
cancelCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.poll();
certPoller.cancelOperation();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
assertEquals(false, certificate.getProperties().isEnabled());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
deleteCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.waitForCompletion();
CertificateOperation certificateOperation = client.deleteCertificateOperation(certName);
assertEquals("completed", certificateOperation.getStatus());
assertRestException(() -> client.deleteCertificateOperation(certName), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
validatePolicy(setupPolicy(), certificate.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
updateCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
certificate.getPolicy().setExportable(false);
CertificatePolicy policy = client.updateCertificatePolicy(certName, certificate.getPolicy());
validatePolicy(certificate.getPolicy(), policy);
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreCertificateBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
listCertificatesRunner((certificates) -> {
HashSet<String> certificatesToList = new HashSet<>(certificates);
for (String certName : certificatesToList) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
sleepInRecordMode(90000);
for (CertificateProperties actualKey : client.listPropertiesOfCertificates()) {
if (certificatesToList.contains(actualKey.getName())) {
certificatesToList.remove(actualKey.getName());
}
}
assertEquals(0, certificatesToList.size());
for (String certName : certificates) {
deleteAndPurgeCertificate(certName);
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
createIssuereRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
validateIssuer(issuer, createdIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", "")),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNullProvider(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", null)),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createIssuer(null), NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
getCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer retrievedIssuer = client.getIssuer(issuer.getName());
validateIssuer(issuer, retrievedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
deleteCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer deletedIssuer = client.deleteIssuer(issuer.getName());
validateIssuer(issuer, deletedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
listCertificateIssuersRunner((certificateIssuers) -> {
HashMap<String, CertificateIssuer> certificateIssuersToList = new HashMap<>(certificateIssuers);
for (CertificateIssuer issuer : certificateIssuersToList.values()) {
CertificateIssuer certificateIssuer = client.createIssuer(issuer);
validateIssuer(issuer, certificateIssuer);
}
for (IssuerProperties issuerProperties : client.listPropertiesOfIssuers()) {
if (certificateIssuersToList.containsKey(issuerProperties.getName())) {
certificateIssuersToList.remove(issuerProperties.getName());
}
}
assertEquals(0, certificateIssuersToList.size());
for (CertificateIssuer issuer : certificateIssuers.values()) {
client.deleteIssuer(issuer.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
client.deleteContacts();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
sleepInRecordMode(6000);
client.listContacts().stream().forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
PagedIterable<CertificateContact> certificateContacts = client.deleteContacts();
validateContact(setupContact(), certificateContacts.iterator().next());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificateOperation("non-existing").poll(), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificatePolicy("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
String certName = generateResourceId("testListCertVersion");
int counter = 5;
for (int i = 0; i < counter; i++) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
int countRecv = 0;
for (CertificateProperties certificateProperties : client.listPropertiesOfCertificateVersions(certName)) {
countRecv++;
assertEquals(certificateProperties.getName(), certName);
}
assertEquals(counter, countRecv);
deleteAndPurgeCertificate(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
listDeletedCertificatesRunner((certificates) -> {
HashSet<String> certificatesToDelete = new HashSet<>(certificates);
for (String certName : certificatesToDelete) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
PollResponse<CertificateOperation> pollResponse = certPoller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = certPoller.poll();
}
}
for (String certName : certificates) {
SyncPoller<DeletedCertificate, Void> poller = client.beginDeleteCertificate(certName);
PollResponse<DeletedCertificate> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
}
sleepInRecordMode(90000);
Iterable<DeletedCertificate> deletedCertificates = client.listDeletedCertificates();
assertTrue(deletedCertificates.iterator().hasNext());
for (DeletedCertificate deletedCertificate : deletedCertificates) {
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
certificatesToDelete.remove(deletedCertificate.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
importCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertTrue(toHexString(importedCertificate.getProperties().getX509Thumbprint()).equalsIgnoreCase("7cb8b7539d87ba7215357b9b9049dff2d3fa59ba"));
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
X509Certificate x509Certificate = null;
try {
x509Certificate = loadCerToX509Certificate(importedCertificate);
} catch (CertificateException e) {
e.printStackTrace();
fail();
} catch (IOException e) {
e.printStackTrace();
fail();
}
assertTrue(x509Certificate.getSubjectX500Principal().getName().equals("CN=KeyVaultTest"));
assertTrue(x509Certificate.getIssuerX500Principal().getName().equals("CN=Root Agency"));
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
getCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.mergeCertificate(new MergeCertificateOptions(generateResourceId("testCert16"), Arrays.asList("test".getBytes()))),
HttpResponseException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException {
getCertificateClient(httpClient, serviceVersion);
importPemCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
assertEquals(CertificateContentType.PEM, importedCertificate.getPolicy().getContentType());
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
private DeletedCertificate pollOnCertificatePurge(String certificateName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedCertificate deletedCertificate = null;
try {
deletedCertificate = client.getDeletedCertificate(certificateName);
} catch (ResourceNotFoundException e) {
}
if (deletedCertificate != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedCertificate;
}
}
System.err.printf("Deleted Key %s was not purged \n", certificateName);
return null;
}
} | class CertificateClientTest extends CertificateClientTestBase {
private CertificateClient client;
@Override
protected void beforeTest() {
beforeTestSetup();
}
private void createCertificateClient(HttpClient httpClient,
CertificateServiceVersion serviceVersion) {
HttpPipeline httpPipeline = getHttpPipeline(httpClient, serviceVersion);
client = new CertificateClientBuilder()
.vaultUrl(getEndpoint())
.pipeline(httpPipeline)
.serviceVersion(serviceVersion)
.buildClient();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
private void deleteAndPurgeCertificate(String certName) {
SyncPoller<DeletedCertificate, Void> deletePoller = client.beginDeleteCertificate(certName);
deletePoller.poll();
deletePoller.waitForCompletion();
client.purgeDeletedCertificate(certName);
pollOnCertificatePurge(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginCreateCertificate("", CertificatePolicy.getDefault()),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(generateResourceId("tempCert"), null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.beginCreateCertificate(null, null),
NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
updateCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate2");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), true, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
deleteAndPurgeCertificate(certName);
});
}
private void validateMapResponse(Map<String, String> expected, Map<String, String> returned) {
for (String key : expected.keySet()) {
String val = returned.get(key);
String expectedVal = expected.get(key);
assertEquals(expectedVal, val);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
updateDisabledCertificateRunner((tags, updatedTags) -> {
String certName = generateResourceId("testCertificate3");
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault(), false, tags);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate keyVaultCertificate = client.updateCertificateProperties(certificate.getProperties().setTags(updatedTags));
Map<String, String> returnedTags = keyVaultCertificate.getProperties().getTags();
validateMapResponse(updatedTags, returnedTags);
assertFalse(keyVaultCertificate.getProperties().isEnabled());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificateWithPolicy getCertificate = client.getCertificate(certificateName);
validatePolicy(certificate.getPolicy(), getCertificate.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateSpecificVersionRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
KeyVaultCertificate getCertificate = client.getCertificateVersion(certificateName, certificate.getProperties().getVersion());
validateCertificate(certificate, getCertificate);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
deleteCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginDeleteCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getDeletedCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
DeletedCertificate deletedCertificate = pollResponse.getValue();
deletedKeyPoller.waitForCompletion();
deletedCertificate = client.getDeletedCertificate(certificateName);
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
assertNotNull(deletedCertificate.getScheduledPurgeDate());
assertEquals(certificateName, deletedCertificate.getName());
client.purgeDeletedCertificate(certificateName);
pollOnCertificatePurge(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificate createdCertificate = certPoller.getFinalResult();
SyncPoller<DeletedCertificate, Void> deletedKeyPoller = client.beginDeleteCertificate(certificateName);
PollResponse<DeletedCertificate> pollResponse = deletedKeyPoller.poll();
deletedKeyPoller.waitForCompletion();
SyncPoller<KeyVaultCertificateWithPolicy, Void> recoverPoller = client.beginRecoverDeletedCertificate(certificateName);
PollResponse<KeyVaultCertificateWithPolicy> recoverPollResponse = recoverPoller.poll();
KeyVaultCertificate recoveredCert = recoverPollResponse.getValue();
recoverPoller.waitForCompletion();
assertEquals(certificateName, recoveredCert.getName());
validateCertificate(createdCertificate, recoveredCert);
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.beginRecoverDeletedCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
backupCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
restoreCertificateRunner((certificateName) -> {
CertificatePolicy initialPolicy = setupPolicy();
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certificateName,
initialPolicy);
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy createdCert = certPoller.getFinalResult();
byte[] backupBytes = (client.backupCertificate(certificateName));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
deleteAndPurgeCertificate(certificateName);
sleepInRecordMode(40000);
KeyVaultCertificateWithPolicy restoredCertificate = client.restoreCertificateBackup(backupBytes);
assertEquals(certificateName, restoredCertificate.getName());
validatePolicy(restoredCertificate.getPolicy(), createdCert.getPolicy());
deleteAndPurgeCertificate(certificateName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> retrievePoller = client.getCertificateOperation(certName);
retrievePoller.waitForCompletion();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy reteievedCert = retrievePoller.getFinalResult();
KeyVaultCertificateWithPolicy expectedCert = certPoller.getFinalResult();
validateCertificate(expectedCert, reteievedCert);
validatePolicy(expectedCert.getPolicy(),
reteievedCert.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
cancelCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.poll();
certPoller.cancelOperation();
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
assertEquals(false, certificate.getProperties().isEnabled());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
deleteCertificateOperationRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, CertificatePolicy.getDefault());
certPoller.waitForCompletion();
CertificateOperation certificateOperation = client.deleteCertificateOperation(certName);
assertEquals("completed", certificateOperation.getStatus());
assertRestException(() -> client.deleteCertificateOperation(certName), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
validatePolicy(setupPolicy(), certificate.getPolicy());
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
updateCertificatePolicyRunner((certName) -> {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller =
client.beginCreateCertificate(certName, setupPolicy());
certPoller.waitForCompletion();
KeyVaultCertificateWithPolicy certificate = certPoller.getFinalResult();
certificate.getPolicy().setExportable(false);
CertificatePolicy policy = client.updateCertificatePolicy(certName, certificate.getPolicy());
validatePolicy(certificate.getPolicy(), policy);
deleteAndPurgeCertificate(certName);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> client.restoreCertificateBackup(keyBackupBytes), ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
listCertificatesRunner((certificates) -> {
HashSet<String> certificatesToList = new HashSet<>(certificates);
for (String certName : certificatesToList) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
sleepInRecordMode(90000);
for (CertificateProperties actualKey : client.listPropertiesOfCertificates()) {
if (certificatesToList.contains(actualKey.getName())) {
certificatesToList.remove(actualKey.getName());
}
}
assertEquals(0, certificatesToList.size());
for (String certName : certificates) {
deleteAndPurgeCertificate(certName);
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
createIssuereRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
validateIssuer(issuer, createdIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", "")),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNullProvider(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.createIssuer(new CertificateIssuer("", null)),
HttpResponseException.class, HttpURLConnection.HTTP_BAD_METHOD);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRunnableThrowsException(() -> client.createIssuer(null), NullPointerException.class);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
getCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer retrievedIssuer = client.getIssuer(issuer.getName());
validateIssuer(issuer, retrievedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
deleteCertificateIssuerRunner((issuer) -> {
CertificateIssuer createdIssuer = client.createIssuer(issuer);
CertificateIssuer deletedIssuer = client.deleteIssuer(issuer.getName());
validateIssuer(issuer, deletedIssuer);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.backupCertificate("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
listCertificateIssuersRunner((certificateIssuers) -> {
HashMap<String, CertificateIssuer> certificateIssuersToList = new HashMap<>(certificateIssuers);
for (CertificateIssuer issuer : certificateIssuersToList.values()) {
CertificateIssuer certificateIssuer = client.createIssuer(issuer);
validateIssuer(issuer, certificateIssuer);
}
for (IssuerProperties issuerProperties : client.listPropertiesOfIssuers()) {
if (certificateIssuersToList.containsKey(issuerProperties.getName())) {
certificateIssuersToList.remove(issuerProperties.getName());
}
}
assertEquals(0, certificateIssuersToList.size());
for (CertificateIssuer issuer : certificateIssuers.values()) {
client.deleteIssuer(issuer.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
client.deleteContacts();
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
sleepInRecordMode(6000);
client.listContacts().stream().forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
List<CertificateContact> contacts = Arrays.asList(setupContact());
client.setContacts(contacts).forEach((retrievedContact) -> validateContact(setupContact(), retrievedContact));
PagedIterable<CertificateContact> certificateContacts = client.deleteContacts();
validateContact(setupContact(), certificateContacts.iterator().next());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificateOperation("non-existing").poll(), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.getCertificatePolicy("non-existing"), ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
String certName = generateResourceId("testListCertVersion");
int counter = 5;
for (int i = 0; i < counter; i++) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
certPoller.waitForCompletion();
}
int countRecv = 0;
for (CertificateProperties certificateProperties : client.listPropertiesOfCertificateVersions(certName)) {
countRecv++;
assertEquals(certificateProperties.getName(), certName);
}
assertEquals(counter, countRecv);
deleteAndPurgeCertificate(certName);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
listDeletedCertificatesRunner((certificates) -> {
HashSet<String> certificatesToDelete = new HashSet<>(certificates);
for (String certName : certificatesToDelete) {
SyncPoller<CertificateOperation, KeyVaultCertificateWithPolicy> certPoller = client.beginCreateCertificate(certName,
CertificatePolicy.getDefault());
PollResponse<CertificateOperation> pollResponse = certPoller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = certPoller.poll();
}
}
for (String certName : certificates) {
SyncPoller<DeletedCertificate, Void> poller = client.beginDeleteCertificate(certName);
PollResponse<DeletedCertificate> pollResponse = poller.poll();
while (!pollResponse.getStatus().isComplete()) {
sleepInRecordMode(1000);
pollResponse = poller.poll();
}
assertNotNull(pollResponse.getValue());
}
sleepInRecordMode(90000);
Iterable<DeletedCertificate> deletedCertificates = client.listDeletedCertificates();
assertTrue(deletedCertificates.iterator().hasNext());
for (DeletedCertificate deletedCertificate : deletedCertificates) {
assertNotNull(deletedCertificate.getDeletedOn());
assertNotNull(deletedCertificate.getRecoveryId());
certificatesToDelete.remove(deletedCertificate.getName());
}
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
importCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertTrue(toHexString(importedCertificate.getProperties().getX509Thumbprint()).equalsIgnoreCase("7cb8b7539d87ba7215357b9b9049dff2d3fa59ba"));
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
X509Certificate x509Certificate = null;
try {
x509Certificate = loadCerToX509Certificate(importedCertificate);
} catch (CertificateException e) {
e.printStackTrace();
fail();
} catch (IOException e) {
e.printStackTrace();
fail();
}
assertTrue(x509Certificate.getSubjectX500Principal().getName().equals("CN=KeyVaultTest"));
assertTrue(x509Certificate.getIssuerX500Principal().getName().equals("CN=Root Agency"));
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
createCertificateClient(httpClient, serviceVersion);
assertRestException(() -> client.mergeCertificate(new MergeCertificateOptions(generateResourceId("testCert16"), Arrays.asList("test".getBytes()))),
HttpResponseException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException {
createCertificateClient(httpClient, serviceVersion);
importPemCertificateRunner((importCertificateOptions) -> {
KeyVaultCertificateWithPolicy importedCertificate = client.importCertificate(importCertificateOptions);
assertEquals(importCertificateOptions.isEnabled(), importedCertificate.getProperties().isEnabled());
assertEquals(CertificateContentType.PEM, importedCertificate.getPolicy().getContentType());
deleteAndPurgeCertificate(importCertificateOptions.getName());
});
}
private DeletedCertificate pollOnCertificatePurge(String certificateName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedCertificate deletedCertificate = null;
try {
deletedCertificate = client.getDeletedCertificate(certificateName);
} catch (ResourceNotFoundException e) {
}
if (deletedCertificate != null) {
sleepInRecordMode(2000);
pendingPollCount += 1;
continue;
} else {
return deletedCertificate;
}
}
System.err.printf("Deleted Key %s was not purged \n", certificateName);
return null;
}
} |
This is merge issue. Thanks for catching this. | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | byte[] certificateContent = readCertificate("certificate.pem"); | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} |
Not merge the most recent. Changed to master one. | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | pemPath = pemPath.substring(1); | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} |
Added a nullcheck | public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
this.tokenRefreshOffset = tokenRefreshOffset;
return this;
} | this.tokenRefreshOffset = tokenRefreshOffset; | public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
if (tokenRefreshOffset != null) {
this.tokenRefreshOffset = tokenRefreshOffset;
}
return this;
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
public IdentityClientOptions() {
authorityHost = DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
}
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
<<<<<<< HEAD
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
*/
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
public IdentityClientOptions() {
authorityHost = DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
}
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
*/
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} |
This should be updated too. | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("certificate.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
if (pemPath.contains(":")) {
pemPath = pemPath.substring(1);
}
return Files.readAllBytes(Paths.get(pemPath));
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (Configuration.getGlobalConfiguration().get(AZURE_TEST_SERVICE_VERSIONS) == null) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
return true;
}
} | return true; | HttpPipeline getHttpPipeline(HttpClient httpClient, CertificateServiceVersion serviceVersion) {
TokenCredential credential = null;
if (!interceptorManager.isPlaybackMode()) {
String clientId = System.getenv("ARM_CLIENTID");
String clientKey = System.getenv("ARM_CLIENTKEY");
String tenantId = System.getenv("AZURE_TENANT_ID");
Objects.requireNonNull(clientId, "The client id cannot be null");
Objects.requireNonNull(clientKey, "The client key cannot be null");
Objects.requireNonNull(tenantId, "The tenant id cannot be null");
credential = new ClientSecretCredentialBuilder()
.clientSecret(clientKey)
.clientId(clientId)
.tenantId(tenantId)
.build();
}
final List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new UserAgentPolicy(SDK_NAME, SDK_VERSION,
Configuration.getGlobalConfiguration().clone(), serviceVersion));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, CertificateAsyncClient.KEY_VAULT_SCOPE));
}
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)));
if (!interceptorManager.isPlaybackMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
} | class CertificateClientTestBase extends TestBase {
static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private static final String SDK_NAME = "client_name";
private static final String SDK_VERSION = "client_version";
private static final String AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS = "AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS";
private static final String SERVICE_VERSION_FROM_ENV =
Configuration.getGlobalConfiguration().get(AZURE_KEYVAULT_TEST_CERTIFICATE_SERVICE_VERSIONS);
private static final String AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL = "ALL";
@Override
protected String getTestName() {
return "";
}
void beforeTestSetup() {
}
@Test
public abstract void createCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createCertificateRunner(Consumer<CertificatePolicy> testRunner) {
final CertificatePolicy certificatePolicy = CertificatePolicy.getDefault();
testRunner.accept(certificatePolicy);
}
@Test
public abstract void createCertificateEmptyName(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void createCertificateNullPolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test public abstract void createCertoificateNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void updateCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void updateDisabledCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateDisabledCertificateRunner(BiConsumer<Map<String, String>, Map<String, String>> testRunner) {
final Map<String, String> tags = new HashMap<>();
tags.put("first tag", "first value");
final Map<String, String> updatedTags = new HashMap<>();
tags.put("first tag", "first value");
tags.put("second tag", "second value");
testRunner.accept(tags, updatedTags);
}
@Test
public abstract void getCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate4"));
}
@Test
public abstract void getCertificateSpecificVersion(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateSpecificVersionRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert5"));
}
@Test
public abstract void deleteCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getDeletedCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert6"));
}
@Test
public abstract void getDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void recoverDeletedCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void recoverDeletedKeyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert7"));
}
@Test
public abstract void recoverDeletedCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void backupCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void backupCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCert8"));
}
@Test
public abstract void backupCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void restoreCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void restoreCertificateRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate9"));
}
@Test
public abstract void getCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate10"));
}
@Test
public abstract void cancelCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void cancelCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate11"));
}
@Test
public abstract void deleteCertificateOperation(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateOperationRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate12"));
}
@Test
public abstract void getCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate13"));
}
@Test
public abstract void updateCertificatePolicy(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void updateCertificatePolicyRunner(Consumer<String> testRunner) {
testRunner.accept(generateResourceId("testCertificate14"));
}
@Test
public abstract void restoreCertificateFromMalformedBackup(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 2; i++) {
certificateName = generateResourceId("listCertKey" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void createIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void createIssuereRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer01"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void createIssuerNull(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void getCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer02"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void deleteCertificateIssuer(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteCertificateIssuerNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void deleteCertificateIssuerRunner(Consumer<CertificateIssuer> testRunner) {
final CertificateIssuer certificateIssuer = setupIssuer(generateResourceId("testIssuer03"));
testRunner.accept(certificateIssuer);
}
@Test
public abstract void listCertificateIssuers(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateIssuersRunner(Consumer<HashMap<String, CertificateIssuer>> testRunner) {
HashMap<String, CertificateIssuer> certificateIssuers = new HashMap<>();
String certificateIssuerName;
for (int i = 0; i < 10; i++) {
certificateIssuerName = generateResourceId("listCertIssuer" + i);
certificateIssuers.put(certificateIssuerName, setupIssuer(certificateIssuerName));
}
testRunner.accept(certificateIssuers);
}
@Test
public abstract void setContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void listContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void deleteContacts(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificateOperatioNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
@Test
public abstract void getCertificatePolicyNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
CertificateContact setupContact() {
return new CertificateContact().setName("name").setEmail("first.last@gmail.com").setPhone("2323-31232");
}
Boolean validateContact(CertificateContact expected, CertificateContact actual) {
return expected.getEmail().equals(actual.getEmail())
&& expected.getName().equals(actual.getName())
&& expected.getPhone().equals(actual.getPhone());
}
@Test
public abstract void listCertificateVersions(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listCertificateVersionsRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName = generateResourceId("listCertVersionTest");
for (int i = 1; i < 5; i++) {
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void listDeletedCertificates(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void listDeletedCertificatesRunner(Consumer<List<String>> testRunner) {
List<String> certificates = new ArrayList<>();
String certificateName;
for (int i = 0; i < 3; i++) {
certificateName = generateResourceId("listDeletedCertificate" + i);
certificates.add(certificateName);
}
testRunner.accept(certificates);
}
@Test
public abstract void importCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion);
void importCertificateRunner(Consumer<ImportCertificateOptions> testRunner) {
String certificateContent = "MIIJOwIBAzCCCPcGCSqGSIb3DQEHAaCCCOgEggjkMIII4DCCBgkGCSqGSIb3DQEHAaCCBfoEggX2MIIF8jCCBe4GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj15YH9pOE58AICB9AEggTYLrI+SAru2dBZRQRlJY7XQ3LeLkah2FcRR3dATDshZ2h0IA2oBrkQIdsLyAAWZ32qYR1qkWxLHn9AqXgu27AEbOk35+pITZaiy63YYBkkpR+pDdngZt19Z0PWrGwHEq5z6BHS2GLyyN8SSOCbdzCz7blj3+7IZYoMj4WOPgOm/tQ6U44SFWek46QwN2zeA4i97v7ftNNns27ms52jqfhOvTA9c/wyfZKAY4aKJfYYUmycKjnnRl012ldS2lOkASFt+lu4QCa72IY6ePtRudPCvmzRv2pkLYS6z3cI7omT8nHP3DymNOqLbFqr5O2M1ZYaLC63Q3xt3eVvbcPh3N08D1hHkhz/KDTvkRAQpvrW8ISKmgDdmzN55Pe55xHfSWGB7gPw8sZea57IxFzWHTK2yvTslooWoosmGxanYY2IG/no3EbPOWDKjPZ4ilYJe5JJ2immlxPz+2e2EOCKpDI+7fzQcRz3PTd3BK+budZ8aXX8aW/lOgKS8WmxZoKnOJBNWeTNWQFugmktXfdPHAdxMhjUXqeGQd8wTvZ4EzQNNafovwkI7IV/ZYoa++RGofVR3ZbRSiBNF6TDj/qXFt0wN/CQnsGAmQAGNiN+D4mY7i25dtTu/Jc7OxLdhAUFpHyJpyrYWLfvOiS5WYBeEDHkiPUa/8eZSPA3MXWZR1RiuDvuNqMjct1SSwdXADTtF68l/US1ksU657+XSC+6ly1A/upz+X71+C4Ho6W0751j5ZMT6xKjGh5pee7MVuduxIzXjWIy3YSd0fIT3U0A5NLEvJ9rfkx6JiHjRLx6V1tqsrtT6BsGtmCQR1UCJPLqsKVDvAINx3cPA/CGqr5OX2BGZlAihGmN6n7gv8w4O0k0LPTAe5YefgXN3m9pE867N31GtHVZaJ/UVgDNYS2jused4rw76ZWN41akx2QN0JSeMJqHXqVz6AKfz8ICS/dFnEGyBNpXiMRxrY/QPKi/wONwqsbDxRW7vZRVKs78pBkE0ksaShlZk5GkeayDWC/7Hi/NqUFtIloK9XB3paLxo1DGu5qqaF34jZdktzkXp0uZqpp+FfKZaiovMjt8F7yHCPk+LYpRsU2Cyc9DVoDA6rIgf+uEP4jppgehsxyT0lJHax2t869R2jYdsXwYUXjgwHIV0voj7bJYPGFlFjXOp6ZW86scsHM5xfsGQoK2Fp838VT34SHE1ZXU/puM7rviREHYW72pfpgGZUILQMohuTPnd8tFtAkbrmjLDo+k9xx7HUvgoFTiNNWuq/cRjr70FKNguMMTIrid+HwfmbRoaxENWdLcOTNeascER2a+37UQolKD5ksrPJG6RdNA7O2pzp3micDYRs/+s28cCIxO
String certificatePassword = "123";
String certificateName = generateResourceId("importCertPkcs");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, Base64.getDecoder().decode(certificateContent))
.setPassword(certificatePassword)
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void importPemCertificate(HttpClient httpClient, CertificateServiceVersion serviceVersion) throws IOException;
void importPemCertificateRunner(Consumer<ImportCertificateOptions> testRunner) throws IOException {
byte[] certificateContent = readCertificate("pemCert.pem");
String certificateName = generateResourceId("importCertPem");
HashMap<String, String> tags = new HashMap<>();
tags.put("key", "val");
ImportCertificateOptions importCertificateOptions = new ImportCertificateOptions(certificateName, certificateContent)
.setPolicy(new CertificatePolicy("Self", "CN=AzureSDK")
.setContentType(CertificateContentType.PEM))
.setEnabled(true)
.setTags(tags);
testRunner.accept(importCertificateOptions);
}
@Test
public abstract void mergeCertificateNotFound(HttpClient httpClient, CertificateServiceVersion serviceVersion);
private byte[] readCertificate(String certName) throws IOException {
String pemPath = getClass().getClassLoader().getResource(certName).getPath();
String pemCert = "";
BufferedReader br = new BufferedReader(new FileReader(pemPath));
try {
String line;
while ((line = br.readLine()) != null) {
pemCert += line + "\n";
}
} finally {
br.close();
}
return pemCert.getBytes();
}
CertificateIssuer setupIssuer(String issuerName) {
return new CertificateIssuer(issuerName, "Test")
.setAdministratorContacts(Arrays.asList(new AdministratorContact().setFirstName("first").setLastName("last").setEmail("first.last@hotmail.com").setPhone("12345")))
.setAccountId("issuerAccountId")
.setEnabled(true)
.setOrganizationId("orgId")
.setPassword("test123");
}
String toHexString(byte[] x5t) {
if (x5t == null) {
return "";
}
StringBuilder hexString = new StringBuilder();
for (int i = 0; i < x5t.length; i++) {
String hex = Integer.toHexString(0xFF & x5t[i]);
if (hex.length() == 1) {
hexString.append('0');
}
hexString.append(hex);
}
return hexString.toString().replace("-", "");
}
X509Certificate loadCerToX509Certificate(KeyVaultCertificateWithPolicy certificate) throws CertificateException, IOException {
assertNotNull(certificate.getCer());
ByteArrayInputStream cerStream = new ByteArrayInputStream(certificate.getCer());
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
X509Certificate x509Certificate = (X509Certificate) certificateFactory.generateCertificate(cerStream);
cerStream.close();
return x509Certificate;
}
Boolean validateIssuer(CertificateIssuer expected, CertificateIssuer actual) {
return expected.getAccountId().equals(actual.getAccountId())
&& expected.isEnabled().equals(actual.isEnabled())
&& (actual.getCreatedOn() != null)
&& (actual.getUpdatedOn() != null)
&& (actual.getId() != null)
&& (actual.getId().length() > 0)
&& expected.getName().equals(actual.getName())
&& expected.getOrganizationId().equals(actual.getOrganizationId())
&& expected.getAdministratorContacts().size() == actual.getAdministratorContacts().size();
}
CertificatePolicy setupPolicy() {
return new CertificatePolicy(WellKnownIssuerNames.SELF, "CN=default")
.setKeyUsage(CertificateKeyUsage.KEY_CERT_SIGN, CertificateKeyUsage.KEY_AGREEMENT)
.setContentType(CertificateContentType.PKCS12)
.setExportable(true)
.setKeyType(CertificateKeyType.EC)
.setCertificateTransparent(false)
.setEnabled(true)
.setKeyCurveName(CertificateKeyCurveName.P_384)
.setKeyReusable(true)
.setValidityInMonths(24)
.setLifetimeActions(new LifetimeAction(CertificatePolicyAction.AUTO_RENEW).setDaysBeforeExpiry(40));
}
boolean validatePolicy(CertificatePolicy expected, CertificatePolicy actual) {
return expected.getKeyType().equals(actual.getKeyType())
&& expected.getContentType().equals(actual.getContentType())
&& actual.getCreatedOn() != null
&& expected.getIssuerName().equals(actual.getIssuerName())
&& expected.getKeyCurveName().equals(actual.getKeyCurveName())
&& expected.isExportable().equals(actual.isExportable())
&& expected.isCertificateTransparent().equals(actual.isCertificateTransparent())
&& expected.isEnabled().equals(actual.isEnabled())
&& expected.isKeyReusable().equals(actual.isKeyReusable())
&& expected.getValidityInMonths().equals(actual.getValidityInMonths())
&& expected.getLifetimeActions().size() == actual.getLifetimeActions().size()
&& expected.getKeyUsage().size() == actual.getKeyUsage().size();
}
boolean validateCertificate(KeyVaultCertificate expected, KeyVaultCertificate actual) {
return expected.getId().equals(actual.getId())
&& expected.getKeyId().equals(actual.getKeyId())
&& expected.getName().equals(actual.getName())
&& expected.getSecretId().equals(actual.getSecretId())
&& expected.getProperties().getVersion().equals(actual.getProperties().getVersion())
&& expected.getProperties().getCreatedOn().equals(actual.getProperties().getCreatedOn())
&& expected.getProperties().getExpiresOn().equals(actual.getProperties().getExpiresOn())
&& expected.getProperties().getRecoveryLevel().equals(actual.getProperties().getRecoveryLevel())
&& expected.getProperties().getX509Thumbprint().length == actual.getProperties().getX509Thumbprint().length
&& expected.getCer().length == actual.getCer().length;
}
public String getEndpoint() {
final String endpoint = interceptorManager.isPlaybackMode()
? "http:
: System.getenv("AZURE_KEYVAULT_ENDPOINT");
Objects.requireNonNull(endpoint);
return endpoint;
}
static void assertRestException(Runnable exceptionThrower, int expectedStatusCode) {
assertRestException(exceptionThrower, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Runnable exceptionThrower, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
try {
exceptionThrower.run();
fail();
} catch (Throwable ex) {
assertRestException(ex, expectedExceptionType, expectedStatusCode);
}
}
String generateResourceId(String suffix) {
if (interceptorManager.isPlaybackMode()) {
return suffix;
}
String id = UUID.randomUUID().toString();
return suffix.length() > 0 ? id + "-" + suffix : id;
}
/**
* Helper method to verify the error was a HttpRequestException and it has a specific HTTP response code.
*
* @param exception Expected error thrown during the test
* @param expectedStatusCode Expected HTTP status code contained in the error response
*/
static void assertRestException(Throwable exception, int expectedStatusCode) {
assertRestException(exception, HttpResponseException.class, expectedStatusCode);
}
static void assertRestException(Throwable exception, Class<? extends HttpResponseException> expectedExceptionType, int expectedStatusCode) {
assertEquals(expectedExceptionType, exception.getClass());
assertEquals(expectedStatusCode, ((HttpResponseException) exception).getResponse().getStatusCode());
}
/**
* Helper method to verify that a command throws an IllegalArgumentException.
*
* @param exceptionThrower Command that should throw the exception
*/
static <T> void assertRunnableThrowsException(Runnable exceptionThrower, Class<T> exception) {
try {
exceptionThrower.run();
fail();
} catch (Exception ex) {
assertEquals(exception, ex.getClass());
}
}
public void sleepInRecordMode(long millis) {
if (interceptorManager.isPlaybackMode()) {
return;
}
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Returns a stream of arguments that includes all combinations of eligible {@link HttpClient HttpClients} and
* service versions that should be tested.
*
* @return A stream of HttpClient and service version combinations to test.
*/
static Stream<Arguments> getTestParameters() {
List<Arguments> argumentsList = new ArrayList<>();
getHttpClients()
.forEach(httpClient -> {
Arrays.stream(CertificateServiceVersion.values()).filter(
CertificateClientTestBase::shouldServiceVersionBeTested)
.forEach(serviceVersion -> argumentsList.add(Arguments.of(httpClient, serviceVersion)));
});
return argumentsList.stream();
}
/**
* Returns whether the given service version match the rules of test framework.
*
* <ul>
* <li>Using latest service version as default if no environment variable is set.</li>
* <li>If it's set to ALL, all Service versions in {@link CertificateServiceVersion} will be tested.</li>
* <li>Otherwise, Service version string should match env variable.</li>
* </ul>
*
* Environment values currently supported are: "ALL", "${version}".
* Use comma to separate http clients want to test.
* e.g. {@code set AZURE_TEST_SERVICE_VERSIONS = V1_0, V2_0}
*
* @param serviceVersion ServiceVersion needs to check
* @return Boolean indicates whether filters out the service version or not.
*/
private static boolean shouldServiceVersionBeTested(CertificateServiceVersion serviceVersion) {
if (CoreUtils.isNullOrEmpty(SERVICE_VERSION_FROM_ENV)) {
return CertificateServiceVersion.getLatest().equals(serviceVersion);
}
if (AZURE_TEST_SERVICE_VERSIONS_VALUE_ALL.equalsIgnoreCase(SERVICE_VERSION_FROM_ENV)) {
return true;
}
String[] configuredServiceVersionList = SERVICE_VERSION_FROM_ENV.split(",");
return Arrays.stream(configuredServiceVersionList).anyMatch(configuredServiceVersion ->
serviceVersion.toString().equals(configuredServiceVersion.trim()));
}
} |
Here we need to validate the scope passed in doesn't contain any characters which could cause us to execute something unintended. See https://github.com/Azure/azure-sdk-for-net/blob/23cc9455cf501d6a65ce8faaedb27242b27c3b51/sdk/identity/Azure.Identity/src/ScopeUtilities.cs#L46-L54 | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" +
" found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput
.replaceAll(redactedOutput, "****"), null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | command.append(scopes); | public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
String azCommand = "az account get-access-token --output json --resource ";
StringBuilder command = new StringBuilder();
command.append(azCommand);
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(logger.logExceptionAsError(ex));
}
command.append(scopes);
AccessToken token = null;
BufferedReader reader = null;
try {
String starter;
String switcher;
if (isWindowsPlatform()) {
starter = WINDOWS_STARTER;
switcher = WINDOWS_SWITCHER;
} else {
starter = LINUX_MAC_STARTER;
switcher = LINUX_MAC_SWITCHER;
}
ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString());
String workingDirectory = getSafeWorkingDirectory();
if (workingDirectory != null) {
builder.directory(new File(workingDirectory));
} else {
throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be"
+ " found to execute CLI command from."));
}
builder.redirectErrorStream(true);
Process process = builder.start();
reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8"));
String line;
StringBuilder output = new StringBuilder();
while (true) {
line = reader.readLine();
if (line == null) {
break;
}
if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Azure CLI not installed", null));
}
output.append(line);
}
String processOutput = output.toString();
if (process.exitValue() != 0) {
if (processOutput.length() > 0) {
String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput);
throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null));
} else {
throw logger.logExceptionAsError(
new ClientAuthenticationException("Failed to invoke Azure CLI ", null));
}
}
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("accessToken");
String time = objectMap.get("expiresOn");
String timeToSecond = time.substring(0, time.indexOf("."));
String timeJoinedWithT = String.join("T", timeToSecond.split(" "));
OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
.atZone(ZoneId.systemDefault())
.toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC);
token = new IdentityToken(accessToken, expiresOn, options);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
} catch (RuntimeException e) {
return Mono.error(logger.logExceptionAsError(e));
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException ex) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex)));
}
}
return Mono.just(token);
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
String defaultWindowsPath = DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
return isWindowsPlatform() ? defaultWindowsPath : DEFAULT_MAC_LINUX_PATH;
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private static final String WINDOWS_STARTER = "cmd.exe";
private static final String LINUX_MAC_STARTER = "/bin/sh";
private static final String WINDOWS_SWITCHER = "/c";
private static final String LINUX_MAC_SWITCHER = "-c";
private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized";
private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found";
private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot");
private static final String DEFAULT_MAC_LINUX_PATH = "/bin/";
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
if (options.getExecutorService() != null) {
publicClientApplicationBuilder.executorService(options.getExecutorService());
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
private String getSafeWorkingDirectory() {
if (isWindowsPlatform()) {
if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) {
return null;
}
return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32";
} else {
return DEFAULT_MAC_LINUX_PATH;
}
}
private boolean isWindowsPlatform() {
return System.getProperty("os.name").contains("Windows");
}
private String redactInfo(String regex, String input) {
return input.replaceAll(regex, "****");
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
Put null checking in method at the first place. | public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | this(name, toLonLatStrings(value)); | public ScoringParameter(String name, GeoPoint value) {
this(name, toLonLatStrings(value));
} | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/ | class with the given name and GeographyPoint value.
*
* @param name Name of the scoring parameter.
* @param value Value of the scoring parameter.
*/ |
Reverts the minus 2 minutes in AccessToken: https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/core/azure-core/src/main/java/com/azure/core/credential/AccessToken.java#L22 | public MsalToken(IAuthenticationResult msalResult, IdentityClientOptions options) {
super(msalResult.accessToken(), OffsetDateTime.ofInstant(
msalResult.expiresOnDate().toInstant().minus(options.getRefreshBeforeExpiry()), ZoneOffset.UTC)
.plusMinutes(2));
this.account = msalResult.account();
} | .plusMinutes(2)); | public MsalToken(IAuthenticationResult msalResult, IdentityClientOptions options) {
super(msalResult.accessToken(),
OffsetDateTime.ofInstant(msalResult.expiresOnDate().toInstant(), ZoneOffset.UTC),
options);
this.account = msalResult.account();
} | class MsalToken extends AccessToken {
private IAccount account;
/**
* Creates an access token instance.
*
* @param msalResult the raw authentication result returned by MSAL
*/
/**
* @return the signed in account
*/
public IAccount getAccount() {
return account;
}
} | class MsalToken extends IdentityToken {
private IAccount account;
/**
* Creates an access token instance.
*
* @param msalResult the raw authentication result returned by MSAL
*/
/**
* @return the signed in account
*/
public IAccount getAccount() {
return account;
}
} |
There is duplicate code here and above for creating the `AccessToken`, which is a possible source of bugs in the future if they are not kept in sync. Consider creating a method to centralise it. | public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new AccessToken(msiToken.getToken(),
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry()));
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
} | msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry())); | public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
if (options.getHttpPipeline() != null) {
publicClientApplicationBuilder = publicClientApplicationBuilder
.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
try {
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return Mono.just(new AccessToken(msiToken.getToken(),
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry())));
} catch (IOException e) {
return Mono.error(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
I feel like we are spreading the two minute concept out throughout the code base. Can you have it as a private static final value that is referred to everywhere? | public MsalToken(IAuthenticationResult msalResult, IdentityClientOptions options) {
super(msalResult.accessToken(), OffsetDateTime.ofInstant(
msalResult.expiresOnDate().toInstant().minus(options.getRefreshBeforeExpiry()), ZoneOffset.UTC)
.plusMinutes(2));
this.account = msalResult.account();
} | .plusMinutes(2)); | public MsalToken(IAuthenticationResult msalResult, IdentityClientOptions options) {
super(msalResult.accessToken(),
OffsetDateTime.ofInstant(msalResult.expiresOnDate().toInstant(), ZoneOffset.UTC),
options);
this.account = msalResult.account();
} | class MsalToken extends AccessToken {
private IAccount account;
/**
* Creates an access token instance.
*
* @param msalResult the raw authentication result returned by MSAL
*/
/**
* @return the signed in account
*/
public IAccount getAccount() {
return account;
}
} | class MsalToken extends IdentityToken {
private IAccount account;
/**
* Creates an access token instance.
*
* @param msalResult the raw authentication result returned by MSAL
*/
/**
* @return the signed in account
*/
public IAccount getAccount() {
return account;
}
} |
Moved to a common base class IdentityToken | public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new AccessToken(msiToken.getToken(),
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry()));
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
} | msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry())); | public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
if (options.getHttpPipeline() != null) {
publicClientApplicationBuilder = publicClientApplicationBuilder
.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
try {
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return Mono.just(new AccessToken(msiToken.getToken(),
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry())));
} catch (IOException e) {
return Mono.error(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} | class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
} |
nit; `null` check. | public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
this.tokenRefreshOffset = tokenRefreshOffset;
return this;
} | this.tokenRefreshOffset = tokenRefreshOffset; | public IdentityClientOptions setTokenRefreshOffset(Duration tokenRefreshOffset) {
if (tokenRefreshOffset != null) {
this.tokenRefreshOffset = tokenRefreshOffset;
}
return this;
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
public IdentityClientOptions() {
authorityHost = DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
}
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
<<<<<<< HEAD
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
*/
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} | class IdentityClientOptions {
private static final String DEFAULT_AUTHORITY_HOST = "https:
private static final int MAX_RETRY_DEFAULT_LIMIT = 3;
private String authorityHost;
private int maxRetry;
private Function<Duration, Duration> retryTimeout;
private ProxyOptions proxyOptions;
private HttpPipeline httpPipeline;
private Duration tokenRefreshOffset = Duration.ofMinutes(2);
private HttpClient httpClient;
/**
* Creates an instance of IdentityClientOptions with default settings.
*/
public IdentityClientOptions() {
authorityHost = DEFAULT_AUTHORITY_HOST;
maxRetry = MAX_RETRY_DEFAULT_LIMIT;
retryTimeout = i -> Duration.ofSeconds((long) Math.pow(2, i.getSeconds() - 1));
}
/**
* @return the Azure Active Directory endpoint to acquire tokens.
*/
public String getAuthorityHost() {
return authorityHost;
}
/**
* Specifies the Azure Active Directory endpoint to acquire tokens.
* @param authorityHost the Azure Active Directory endpoint
* @return IdentityClientOptions
*/
public IdentityClientOptions setAuthorityHost(String authorityHost) {
this.authorityHost = authorityHost;
return this;
}
/**
* @return the max number of retries when an authentication request fails.
*/
public int getMaxRetry() {
return maxRetry;
}
/**
* Specifies the max number of retries when an authentication request fails.
* @param maxRetry the number of retries
* @return IdentityClientOptions
*/
public IdentityClientOptions setMaxRetry(int maxRetry) {
this.maxRetry = maxRetry;
return this;
}
/**
* @return a Function to calculate seconds of timeout on every retried request.
*/
public Function<Duration, Duration> getRetryTimeout() {
return retryTimeout;
}
/**
* Specifies a Function to calculate seconds of timeout on every retried request.
* @param retryTimeout the Function that returns a timeout in seconds given the number of retry
* @return IdentityClientOptions
*/
public IdentityClientOptions setRetryTimeout(Function<Duration, Duration> retryTimeout) {
this.retryTimeout = retryTimeout;
return this;
}
/**
* @return the options for proxy configuration.
*/
public ProxyOptions getProxyOptions() {
return proxyOptions;
}
/**
* Specifies the options for proxy configuration.
* @param proxyOptions the options for proxy configuration
* @return IdentityClientOptions
*/
public IdentityClientOptions setProxyOptions(ProxyOptions proxyOptions) {
this.proxyOptions = proxyOptions;
return this;
}
/**
* @return the HttpPipeline to send all requests
*/
public HttpPipeline getHttpPipeline() {
return httpPipeline;
}
/**
* @return the HttpClient to use for requests
*/
public HttpClient getHttpClient() {
return httpClient;
}
/**
* Specifies the HttpPipeline to send all requests. This setting overrides the others.
* @param httpPipeline the HttpPipeline to send all requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpPipeline(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
return this;
}
/**
* @return how long before the actual token expiry to refresh the token.
*/
public Duration getTokenRefreshOffset() {
return tokenRefreshOffset;
}
/**
* Sets how long before the actual token expiry to refresh the token. The
* token will be considered expired at and after the time of (actual
* expiry - token refresh offset). The default offset is 2 minutes.
*
* This is useful when network is congested and a request containing the
* token takes longer than normal to get to the server.
*
* @param tokenRefreshOffset the duration before the actual expiry of a token to refresh it
*/
/**
* Specifies the HttpClient to send use for requests.
* @param httpClient the http client to use for requests
* @return IdentityClientOptions
*/
public IdentityClientOptions setHttpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
} |
Along with the other comment, this property name could be changed to be more generic. | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.NONE;
this.allowedHeaderNames = Collections.emptySet();
this.allowedQueryParameterNames = Collections.emptySet();
this.prettyPrintJson = false;
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintJson = httpLogOptions.isPrettyPrintJson();
}
} | this.prettyPrintJson = false; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.NONE;
this.allowedHeaderNames = Collections.emptySet();
this.allowedQueryParameterNames = Collections.emptySet();
this.prettyPrintBody = false;
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintJson;
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return logRequest(logger, context.getHttpRequest())
.then(next.process())
.flatMap(response -> logResponse(logger, response, startNs))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
/*
* Logs the HTTP request.
*
* @param logger Logger used to log the request.
* @param request HTTP request being sent to Azure.
* @return A Mono which will emit the string to log.
*/
private Mono<Void> logRequest(final ClientLogger logger, final HttpRequest request) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.empty();
}
StringBuilder requestLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
requestLogMessage.append("--> ")
.append(request.getHttpMethod())
.append(" ")
.append(getRedactedUrl(request.getUrl()))
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, request.getHeaders(), requestLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
return logAndReturn(logger, requestLogMessage, null);
}
if (request.getBody() == null) {
requestLogMessage.append("(empty body)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
String contentType = request.getHeaders().getValue("Content-Type");
long contentLength = getContentLength(logger, request.getHeaders());
if (shouldBodyBeLogged(contentType, contentLength)) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
request.setBody(
request.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
requestLogMessage.append(contentLength)
.append("-byte body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentType,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
logger.info(requestLogMessage.toString());
}));
return Mono.empty();
} else {
requestLogMessage.append(contentLength)
.append("-byte body: (content not logged)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
}
/*
* Logs thr HTTP response.
*
* @param logger Logger used to log the response.
* @param response HTTP response returned from Azure.
* @param startNs Nanosecond representation of when the request was sent.
* @return A Mono containing the HTTP response.
*/
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.just(response);
}
long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
String contentLengthString = response.getHeaderValue("Content-Length");
String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString))
? "unknown-length body"
: contentLengthString + "-byte body";
StringBuilder responseLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
responseLogMessage.append("<-- ")
.append(response.getStatusCode())
.append(" ")
.append(getRedactedUrl(response.getRequest().getUrl()))
.append(" (")
.append(tookMs)
.append(" ms, ")
.append(bodySize)
.append(")")
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, response.getHeaders(), responseLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
responseLogMessage.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
String contentTypeHeader = response.getHeaderValue("Content-Type");
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
HttpResponse bufferedResponse = response.buffer();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
return bufferedResponse.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
responseLogMessage.append("Response body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentTypeHeader,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("<-- END HTTP");
logger.info(responseLogMessage.toString());
}).then(Mono.just(bufferedResponse));
} else {
responseLogMessage.append("(body content not logged)")
.append(System.lineSeparator())
.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
}
private <T> Mono<T> logAndReturn(ClientLogger logger, StringBuilder logMessageBuilder, T data) {
logger.info(logMessageBuilder.toString());
return Mono.justOrEmpty(data);
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private String getRedactedUrl(URL url) {
return UrlBuilder.parse(url)
.setQuery(getAllowedQueryString(url.getQuery()))
.toString();
}
/*
* Generates the logging safe query parameters string.
*
* @param queryString Query parameter string from the request URL.
* @return A query parameter string redacted based on the configurations in this policy.
*/
private String getAllowedQueryString(String queryString) {
if (CoreUtils.isNullOrEmpty(queryString)) {
return "";
}
StringBuilder queryStringBuilder = new StringBuilder();
String[] queryParams = queryString.split("&");
for (String queryParam : queryParams) {
if (queryStringBuilder.length() > 0) {
queryStringBuilder.append("&");
}
String[] queryPair = queryParam.split("=", 2);
if (queryPair.length == 2) {
String queryName = queryPair[0];
if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) {
queryStringBuilder.append(queryParam);
} else {
queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER);
}
} else {
queryStringBuilder.append(queryParam);
}
}
return queryStringBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private void addHeadersToLogMessage(ClientLogger logger, HttpHeaders headers, StringBuilder sb) {
if (!httpLogDetailLevel.shouldLogHeaders() || logger.canLogAtLevel(LogLevel.VERBOSE)) {
return;
}
for (HttpHeader header : headers) {
String headerName = header.getName();
sb.append(headerName).append(":");
if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) {
sb.append(header.getValue());
} else {
sb.append(REDACTED_PLACEHOLDER);
}
sb.append(System.lineSeparator());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) {
String result = body;
if (prettyPrintJson && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON: {}", e.getMessage());
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue("Content-Length");
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.",
headers.getValue("content-length"), e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return logRequest(logger, context.getHttpRequest())
.then(next.process())
.flatMap(response -> logResponse(logger, response, startNs))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
/*
* Logs the HTTP request.
*
* @param logger Logger used to log the request.
* @param request HTTP request being sent to Azure.
* @return A Mono which will emit the string to log.
*/
private Mono<Void> logRequest(final ClientLogger logger, final HttpRequest request) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.empty();
}
StringBuilder requestLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
requestLogMessage.append("--> ")
.append(request.getHttpMethod())
.append(" ")
.append(getRedactedUrl(request.getUrl()))
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, request.getHeaders(), requestLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
return logAndReturn(logger, requestLogMessage, null);
}
if (request.getBody() == null) {
requestLogMessage.append("(empty body)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
String contentType = request.getHeaders().getValue("Content-Type");
long contentLength = getContentLength(logger, request.getHeaders());
if (shouldBodyBeLogged(contentType, contentLength)) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
request.setBody(
request.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
requestLogMessage.append(contentLength)
.append("-byte body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentType,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
logger.info(requestLogMessage.toString());
}));
return Mono.empty();
} else {
requestLogMessage.append(contentLength)
.append("-byte body: (content not logged)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
}
/*
* Logs thr HTTP response.
*
* @param logger Logger used to log the response.
* @param response HTTP response returned from Azure.
* @param startNs Nanosecond representation of when the request was sent.
* @return A Mono containing the HTTP response.
*/
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.just(response);
}
long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
String contentLengthString = response.getHeaderValue("Content-Length");
String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString))
? "unknown-length body"
: contentLengthString + "-byte body";
StringBuilder responseLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
responseLogMessage.append("<-- ")
.append(response.getStatusCode())
.append(" ")
.append(getRedactedUrl(response.getRequest().getUrl()))
.append(" (")
.append(tookMs)
.append(" ms, ")
.append(bodySize)
.append(")")
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, response.getHeaders(), responseLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
responseLogMessage.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
String contentTypeHeader = response.getHeaderValue("Content-Type");
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
HttpResponse bufferedResponse = response.buffer();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
return bufferedResponse.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
responseLogMessage.append("Response body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentTypeHeader,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("<-- END HTTP");
logger.info(responseLogMessage.toString());
}).then(Mono.just(bufferedResponse));
} else {
responseLogMessage.append("(body content not logged)")
.append(System.lineSeparator())
.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
}
private <T> Mono<T> logAndReturn(ClientLogger logger, StringBuilder logMessageBuilder, T data) {
logger.info(logMessageBuilder.toString());
return Mono.justOrEmpty(data);
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private String getRedactedUrl(URL url) {
return UrlBuilder.parse(url)
.setQuery(getAllowedQueryString(url.getQuery()))
.toString();
}
/*
* Generates the logging safe query parameters string.
*
* @param queryString Query parameter string from the request URL.
* @return A query parameter string redacted based on the configurations in this policy.
*/
private String getAllowedQueryString(String queryString) {
if (CoreUtils.isNullOrEmpty(queryString)) {
return "";
}
StringBuilder queryStringBuilder = new StringBuilder();
String[] queryParams = queryString.split("&");
for (String queryParam : queryParams) {
if (queryStringBuilder.length() > 0) {
queryStringBuilder.append("&");
}
String[] queryPair = queryParam.split("=", 2);
if (queryPair.length == 2) {
String queryName = queryPair[0];
if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) {
queryStringBuilder.append(queryParam);
} else {
queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER);
}
} else {
queryStringBuilder.append(queryParam);
}
}
return queryStringBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private void addHeadersToLogMessage(ClientLogger logger, HttpHeaders headers, StringBuilder sb) {
if (!httpLogDetailLevel.shouldLogHeaders() || logger.canLogAtLevel(LogLevel.VERBOSE)) {
return;
}
for (HttpHeader header : headers) {
String headerName = header.getName();
sb.append(headerName).append(":");
if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) {
sb.append(header.getValue());
} else {
sb.append(REDACTED_PLACEHOLDER);
}
sb.append(System.lineSeparator());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON: {}", e.getMessage());
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue("Content-Length");
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.",
headers.getValue("content-length"), e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
} |
Renamed this property as well. | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.NONE;
this.allowedHeaderNames = Collections.emptySet();
this.allowedQueryParameterNames = Collections.emptySet();
this.prettyPrintJson = false;
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintJson = httpLogOptions.isPrettyPrintJson();
}
} | this.prettyPrintJson = false; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.NONE;
this.allowedHeaderNames = Collections.emptySet();
this.allowedQueryParameterNames = Collections.emptySet();
this.prettyPrintBody = false;
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintJson;
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return logRequest(logger, context.getHttpRequest())
.then(next.process())
.flatMap(response -> logResponse(logger, response, startNs))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
/*
* Logs the HTTP request.
*
* @param logger Logger used to log the request.
* @param request HTTP request being sent to Azure.
* @return A Mono which will emit the string to log.
*/
private Mono<Void> logRequest(final ClientLogger logger, final HttpRequest request) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.empty();
}
StringBuilder requestLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
requestLogMessage.append("--> ")
.append(request.getHttpMethod())
.append(" ")
.append(getRedactedUrl(request.getUrl()))
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, request.getHeaders(), requestLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
return logAndReturn(logger, requestLogMessage, null);
}
if (request.getBody() == null) {
requestLogMessage.append("(empty body)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
String contentType = request.getHeaders().getValue("Content-Type");
long contentLength = getContentLength(logger, request.getHeaders());
if (shouldBodyBeLogged(contentType, contentLength)) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
request.setBody(
request.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
requestLogMessage.append(contentLength)
.append("-byte body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentType,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
logger.info(requestLogMessage.toString());
}));
return Mono.empty();
} else {
requestLogMessage.append(contentLength)
.append("-byte body: (content not logged)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
}
/*
* Logs thr HTTP response.
*
* @param logger Logger used to log the response.
* @param response HTTP response returned from Azure.
* @param startNs Nanosecond representation of when the request was sent.
* @return A Mono containing the HTTP response.
*/
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.just(response);
}
long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
String contentLengthString = response.getHeaderValue("Content-Length");
String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString))
? "unknown-length body"
: contentLengthString + "-byte body";
StringBuilder responseLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
responseLogMessage.append("<-- ")
.append(response.getStatusCode())
.append(" ")
.append(getRedactedUrl(response.getRequest().getUrl()))
.append(" (")
.append(tookMs)
.append(" ms, ")
.append(bodySize)
.append(")")
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, response.getHeaders(), responseLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
responseLogMessage.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
String contentTypeHeader = response.getHeaderValue("Content-Type");
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
HttpResponse bufferedResponse = response.buffer();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
return bufferedResponse.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
responseLogMessage.append("Response body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentTypeHeader,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("<-- END HTTP");
logger.info(responseLogMessage.toString());
}).then(Mono.just(bufferedResponse));
} else {
responseLogMessage.append("(body content not logged)")
.append(System.lineSeparator())
.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
}
private <T> Mono<T> logAndReturn(ClientLogger logger, StringBuilder logMessageBuilder, T data) {
logger.info(logMessageBuilder.toString());
return Mono.justOrEmpty(data);
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private String getRedactedUrl(URL url) {
return UrlBuilder.parse(url)
.setQuery(getAllowedQueryString(url.getQuery()))
.toString();
}
/*
* Generates the logging safe query parameters string.
*
* @param queryString Query parameter string from the request URL.
* @return A query parameter string redacted based on the configurations in this policy.
*/
private String getAllowedQueryString(String queryString) {
if (CoreUtils.isNullOrEmpty(queryString)) {
return "";
}
StringBuilder queryStringBuilder = new StringBuilder();
String[] queryParams = queryString.split("&");
for (String queryParam : queryParams) {
if (queryStringBuilder.length() > 0) {
queryStringBuilder.append("&");
}
String[] queryPair = queryParam.split("=", 2);
if (queryPair.length == 2) {
String queryName = queryPair[0];
if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) {
queryStringBuilder.append(queryParam);
} else {
queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER);
}
} else {
queryStringBuilder.append(queryParam);
}
}
return queryStringBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private void addHeadersToLogMessage(ClientLogger logger, HttpHeaders headers, StringBuilder sb) {
if (!httpLogDetailLevel.shouldLogHeaders() || logger.canLogAtLevel(LogLevel.VERBOSE)) {
return;
}
for (HttpHeader header : headers) {
String headerName = header.getName();
sb.append(headerName).append(":");
if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) {
sb.append(header.getValue());
} else {
sb.append(REDACTED_PLACEHOLDER);
}
sb.append(System.lineSeparator());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) {
String result = body;
if (prettyPrintJson && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON: {}", e.getMessage());
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue("Content-Length");
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.",
headers.getValue("content-length"), e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return logRequest(logger, context.getHttpRequest())
.then(next.process())
.flatMap(response -> logResponse(logger, response, startNs))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
/*
* Logs the HTTP request.
*
* @param logger Logger used to log the request.
* @param request HTTP request being sent to Azure.
* @return A Mono which will emit the string to log.
*/
private Mono<Void> logRequest(final ClientLogger logger, final HttpRequest request) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.empty();
}
StringBuilder requestLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
requestLogMessage.append("--> ")
.append(request.getHttpMethod())
.append(" ")
.append(getRedactedUrl(request.getUrl()))
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, request.getHeaders(), requestLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
return logAndReturn(logger, requestLogMessage, null);
}
if (request.getBody() == null) {
requestLogMessage.append("(empty body)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
String contentType = request.getHeaders().getValue("Content-Type");
long contentLength = getContentLength(logger, request.getHeaders());
if (shouldBodyBeLogged(contentType, contentLength)) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
request.setBody(
request.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
requestLogMessage.append(contentLength)
.append("-byte body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentType,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
logger.info(requestLogMessage.toString());
}));
return Mono.empty();
} else {
requestLogMessage.append(contentLength)
.append("-byte body: (content not logged)")
.append(System.lineSeparator())
.append("--> END ")
.append(request.getHttpMethod())
.append(System.lineSeparator());
return logAndReturn(logger, requestLogMessage, null);
}
}
/*
* Logs thr HTTP response.
*
* @param logger Logger used to log the response.
* @param response HTTP response returned from Azure.
* @param startNs Nanosecond representation of when the request was sent.
* @return A Mono containing the HTTP response.
*/
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) {
if (!logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
return Mono.just(response);
}
long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
String contentLengthString = response.getHeaderValue("Content-Length");
String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString))
? "unknown-length body"
: contentLengthString + "-byte body";
StringBuilder responseLogMessage = new StringBuilder();
if (httpLogDetailLevel.shouldLogUrl()) {
responseLogMessage.append("<-- ")
.append(response.getStatusCode())
.append(" ")
.append(getRedactedUrl(response.getRequest().getUrl()))
.append(" (")
.append(tookMs)
.append(" ms, ")
.append(bodySize)
.append(")")
.append(System.lineSeparator());
}
addHeadersToLogMessage(logger, response.getHeaders(), responseLogMessage);
if (!httpLogDetailLevel.shouldLogBody()) {
responseLogMessage.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
String contentTypeHeader = response.getHeaderValue("Content-Type");
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
HttpResponse bufferedResponse = response.buffer();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream((int) contentLength);
return bufferedResponse.getBody()
.doOnNext(byteBuffer -> {
for (int i = byteBuffer.position(); i < byteBuffer.limit(); i++) {
outputStream.write(byteBuffer.get(i));
}
})
.doFinally(ignored -> {
responseLogMessage.append("Response body:")
.append(System.lineSeparator())
.append(prettyPrintIfNeeded(logger, contentTypeHeader,
new String(outputStream.toByteArray(), StandardCharsets.UTF_8)))
.append(System.lineSeparator())
.append("<-- END HTTP");
logger.info(responseLogMessage.toString());
}).then(Mono.just(bufferedResponse));
} else {
responseLogMessage.append("(body content not logged)")
.append(System.lineSeparator())
.append("<-- END HTTP");
return logAndReturn(logger, responseLogMessage, response);
}
}
private <T> Mono<T> logAndReturn(ClientLogger logger, StringBuilder logMessageBuilder, T data) {
logger.info(logMessageBuilder.toString());
return Mono.justOrEmpty(data);
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private String getRedactedUrl(URL url) {
return UrlBuilder.parse(url)
.setQuery(getAllowedQueryString(url.getQuery()))
.toString();
}
/*
* Generates the logging safe query parameters string.
*
* @param queryString Query parameter string from the request URL.
* @return A query parameter string redacted based on the configurations in this policy.
*/
private String getAllowedQueryString(String queryString) {
if (CoreUtils.isNullOrEmpty(queryString)) {
return "";
}
StringBuilder queryStringBuilder = new StringBuilder();
String[] queryParams = queryString.split("&");
for (String queryParam : queryParams) {
if (queryStringBuilder.length() > 0) {
queryStringBuilder.append("&");
}
String[] queryPair = queryParam.split("=", 2);
if (queryPair.length == 2) {
String queryName = queryPair[0];
if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) {
queryStringBuilder.append(queryParam);
} else {
queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER);
}
} else {
queryStringBuilder.append(queryParam);
}
}
return queryStringBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private void addHeadersToLogMessage(ClientLogger logger, HttpHeaders headers, StringBuilder sb) {
if (!httpLogDetailLevel.shouldLogHeaders() || logger.canLogAtLevel(LogLevel.VERBOSE)) {
return;
}
for (HttpHeader header : headers) {
String headerName = header.getName();
sb.append(headerName).append(":");
if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) {
sb.append(header.getValue());
} else {
sb.append(REDACTED_PLACEHOLDER);
}
sb.append(System.lineSeparator());
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON: {}", e.getMessage());
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue("Content-Length");
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.",
headers.getValue("content-length"), e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
} |
So the `retryCount` is global for the `Download` not per `read()` invocation? | private Flux<ByteBuffer> tryContinueFlux(Throwable t, int retryCount, DownloadRetryOptions options) {
if (retryCount >= options.getMaxRetryRequests()
|| !(t instanceof IOException || t instanceof TimeoutException)) {
return Flux.error(t);
} else {
/*
We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably
come from an unsuccessful request, which would be propagated through the onError methods. However, it is
possible the method call that returns a Single is what throws (like how our apis throw some exceptions at
call time rather than at subscription time.
*/
try {
/*Get a new stream from the new response and try reading from it.
Do not compound the number of retries by calling getValue on the DownloadResponse; just get
the raw body.
*/
return getter.apply(info)
.flatMapMany(newResponse ->
applyReliableDownload(newResponse.rawResponse.getValue(), retryCount, options));
} catch (Exception e) {
return Flux.error(e);
}
}
} | if (retryCount >= options.getMaxRetryRequests() | private Flux<ByteBuffer> tryContinueFlux(Throwable t, int retryCount, DownloadRetryOptions options) {
if (retryCount >= options.getMaxRetryRequests()
|| !(t instanceof IOException || t instanceof TimeoutException)) {
return Flux.error(t);
} else {
/*
We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably
come from an unsuccessful request, which would be propagated through the onError methods. However, it is
possible the method call that returns a Single is what throws (like how our apis throw some exceptions at
call time rather than at subscription time.
*/
try {
/*Get a new stream from the new response and try reading from it.
Do not compound the number of retries by calling getValue on the DownloadResponse; just get
the raw body.
*/
return getter.apply(info)
.flatMapMany(newResponse ->
applyReliableDownload(newResponse.rawResponse.getValue(), retryCount, options));
} catch (Exception e) {
return Flux.error(e);
}
}
} | class ReliableDownload {
private static final Duration TIMEOUT_VALUE = Duration.ofSeconds(60);
private final BlobsDownloadResponse rawResponse;
private final DownloadRetryOptions options;
private final HttpGetterInfo info;
private final Function<HttpGetterInfo, Mono<ReliableDownload>> getter;
ReliableDownload(BlobsDownloadResponse rawResponse, DownloadRetryOptions options, HttpGetterInfo info,
Function<HttpGetterInfo, Mono<ReliableDownload>> getter) {
StorageImplUtils.assertNotNull("getter", getter);
StorageImplUtils.assertNotNull("info", info);
StorageImplUtils.assertNotNull("info.eTag", info.getETag());
this.rawResponse = rawResponse;
this.options = (options == null) ? new DownloadRetryOptions() : options;
this.info = info;
this.getter = getter;
}
HttpRequest getRequest() {
return rawResponse.getRequest();
}
int getStatusCode() {
return rawResponse.getStatusCode();
}
HttpHeaders getHeaders() {
return rawResponse.getHeaders();
}
BlobDownloadHeaders getDeserializedHeaders() {
return rawResponse.getDeserializedHeaders();
}
Flux<ByteBuffer> getValue() {
/*
We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of
retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will
add 1 before calling into tryContinueFlux, we set the initial value to -1.
*/
Flux<ByteBuffer> value = (options.getMaxRetryRequests() == 0)
? rawResponse.getValue().timeout(TIMEOUT_VALUE)
: applyReliableDownload(rawResponse.getValue(), -1, options);
return value.switchIfEmpty(Flux.just(ByteBuffer.wrap(new byte[0])));
}
private Flux<ByteBuffer> applyReliableDownload(Flux<ByteBuffer> data, int currentRetryCount,
DownloadRetryOptions options) {
return data
.timeout(TIMEOUT_VALUE)
.doOnNext(buffer -> {
/*
Update how much data we have received in case we need to retry and propagate to the user the data we
have received.
*/
this.info.setOffset(this.info.getOffset() + buffer.remaining());
if (this.info.getCount() != null) {
this.info.setCount(this.info.getCount() - buffer.remaining());
}
}).onErrorResume(t2 -> {
return tryContinueFlux(t2, currentRetryCount + 1, options);
});
}
} | class ReliableDownload {
private static final Duration TIMEOUT_VALUE = Duration.ofSeconds(60);
private final BlobsDownloadResponse rawResponse;
private final DownloadRetryOptions options;
private final HttpGetterInfo info;
private final Function<HttpGetterInfo, Mono<ReliableDownload>> getter;
ReliableDownload(BlobsDownloadResponse rawResponse, DownloadRetryOptions options, HttpGetterInfo info,
Function<HttpGetterInfo, Mono<ReliableDownload>> getter) {
StorageImplUtils.assertNotNull("getter", getter);
StorageImplUtils.assertNotNull("info", info);
StorageImplUtils.assertNotNull("info.eTag", info.getETag());
this.rawResponse = rawResponse;
this.options = (options == null) ? new DownloadRetryOptions() : options;
this.info = info;
this.getter = getter;
}
HttpRequest getRequest() {
return rawResponse.getRequest();
}
int getStatusCode() {
return rawResponse.getStatusCode();
}
HttpHeaders getHeaders() {
return rawResponse.getHeaders();
}
BlobDownloadHeaders getDeserializedHeaders() {
return rawResponse.getDeserializedHeaders();
}
Flux<ByteBuffer> getValue() {
/*
We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of
retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will
add 1 before calling into tryContinueFlux, we set the initial value to -1.
*/
Flux<ByteBuffer> value = (options.getMaxRetryRequests() == 0)
? rawResponse.getValue().timeout(TIMEOUT_VALUE)
: applyReliableDownload(rawResponse.getValue(), -1, options);
return value.switchIfEmpty(Flux.just(ByteBuffer.wrap(new byte[0])));
}
private Flux<ByteBuffer> applyReliableDownload(Flux<ByteBuffer> data, int currentRetryCount,
DownloadRetryOptions options) {
return data
.timeout(TIMEOUT_VALUE)
.doOnNext(buffer -> {
/*
Update how much data we have received in case we need to retry and propagate to the user the data we
have received.
*/
this.info.setOffset(this.info.getOffset() + buffer.remaining());
if (this.info.getCount() != null) {
this.info.setCount(this.info.getCount() - buffer.remaining());
}
}).onErrorResume(t2 -> {
return tryContinueFlux(t2, currentRetryCount + 1, options);
});
}
} |
It is as I've coded it here. I picked this a little bit arbitrarily. Since this was easier/faster and both of us seemed a little unsure that one option was strictly better than the other, I figured sticking with the existing plan would make it more likely to be ready to ship next week. We also see these stale streams pretty rarely, and I think even one retry per download operation is enough in the vast vast majority of cases. If you feel strongly that resetting the count on each successful item is better or if we see some Timeouts still trickling through, I can update it in a later release. | private Flux<ByteBuffer> tryContinueFlux(Throwable t, int retryCount, DownloadRetryOptions options) {
if (retryCount >= options.getMaxRetryRequests()
|| !(t instanceof IOException || t instanceof TimeoutException)) {
return Flux.error(t);
} else {
/*
We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably
come from an unsuccessful request, which would be propagated through the onError methods. However, it is
possible the method call that returns a Single is what throws (like how our apis throw some exceptions at
call time rather than at subscription time.
*/
try {
/*Get a new stream from the new response and try reading from it.
Do not compound the number of retries by calling getValue on the DownloadResponse; just get
the raw body.
*/
return getter.apply(info)
.flatMapMany(newResponse ->
applyReliableDownload(newResponse.rawResponse.getValue(), retryCount, options));
} catch (Exception e) {
return Flux.error(e);
}
}
} | if (retryCount >= options.getMaxRetryRequests() | private Flux<ByteBuffer> tryContinueFlux(Throwable t, int retryCount, DownloadRetryOptions options) {
if (retryCount >= options.getMaxRetryRequests()
|| !(t instanceof IOException || t instanceof TimeoutException)) {
return Flux.error(t);
} else {
/*
We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably
come from an unsuccessful request, which would be propagated through the onError methods. However, it is
possible the method call that returns a Single is what throws (like how our apis throw some exceptions at
call time rather than at subscription time.
*/
try {
/*Get a new stream from the new response and try reading from it.
Do not compound the number of retries by calling getValue on the DownloadResponse; just get
the raw body.
*/
return getter.apply(info)
.flatMapMany(newResponse ->
applyReliableDownload(newResponse.rawResponse.getValue(), retryCount, options));
} catch (Exception e) {
return Flux.error(e);
}
}
} | class ReliableDownload {
private static final Duration TIMEOUT_VALUE = Duration.ofSeconds(60);
private final BlobsDownloadResponse rawResponse;
private final DownloadRetryOptions options;
private final HttpGetterInfo info;
private final Function<HttpGetterInfo, Mono<ReliableDownload>> getter;
ReliableDownload(BlobsDownloadResponse rawResponse, DownloadRetryOptions options, HttpGetterInfo info,
Function<HttpGetterInfo, Mono<ReliableDownload>> getter) {
StorageImplUtils.assertNotNull("getter", getter);
StorageImplUtils.assertNotNull("info", info);
StorageImplUtils.assertNotNull("info.eTag", info.getETag());
this.rawResponse = rawResponse;
this.options = (options == null) ? new DownloadRetryOptions() : options;
this.info = info;
this.getter = getter;
}
HttpRequest getRequest() {
return rawResponse.getRequest();
}
int getStatusCode() {
return rawResponse.getStatusCode();
}
HttpHeaders getHeaders() {
return rawResponse.getHeaders();
}
BlobDownloadHeaders getDeserializedHeaders() {
return rawResponse.getDeserializedHeaders();
}
Flux<ByteBuffer> getValue() {
/*
We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of
retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will
add 1 before calling into tryContinueFlux, we set the initial value to -1.
*/
Flux<ByteBuffer> value = (options.getMaxRetryRequests() == 0)
? rawResponse.getValue().timeout(TIMEOUT_VALUE)
: applyReliableDownload(rawResponse.getValue(), -1, options);
return value.switchIfEmpty(Flux.just(ByteBuffer.wrap(new byte[0])));
}
private Flux<ByteBuffer> applyReliableDownload(Flux<ByteBuffer> data, int currentRetryCount,
DownloadRetryOptions options) {
return data
.timeout(TIMEOUT_VALUE)
.doOnNext(buffer -> {
/*
Update how much data we have received in case we need to retry and propagate to the user the data we
have received.
*/
this.info.setOffset(this.info.getOffset() + buffer.remaining());
if (this.info.getCount() != null) {
this.info.setCount(this.info.getCount() - buffer.remaining());
}
}).onErrorResume(t2 -> {
return tryContinueFlux(t2, currentRetryCount + 1, options);
});
}
} | class ReliableDownload {
private static final Duration TIMEOUT_VALUE = Duration.ofSeconds(60);
private final BlobsDownloadResponse rawResponse;
private final DownloadRetryOptions options;
private final HttpGetterInfo info;
private final Function<HttpGetterInfo, Mono<ReliableDownload>> getter;
ReliableDownload(BlobsDownloadResponse rawResponse, DownloadRetryOptions options, HttpGetterInfo info,
Function<HttpGetterInfo, Mono<ReliableDownload>> getter) {
StorageImplUtils.assertNotNull("getter", getter);
StorageImplUtils.assertNotNull("info", info);
StorageImplUtils.assertNotNull("info.eTag", info.getETag());
this.rawResponse = rawResponse;
this.options = (options == null) ? new DownloadRetryOptions() : options;
this.info = info;
this.getter = getter;
}
HttpRequest getRequest() {
return rawResponse.getRequest();
}
int getStatusCode() {
return rawResponse.getStatusCode();
}
HttpHeaders getHeaders() {
return rawResponse.getHeaders();
}
BlobDownloadHeaders getDeserializedHeaders() {
return rawResponse.getDeserializedHeaders();
}
Flux<ByteBuffer> getValue() {
/*
We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of
retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will
add 1 before calling into tryContinueFlux, we set the initial value to -1.
*/
Flux<ByteBuffer> value = (options.getMaxRetryRequests() == 0)
? rawResponse.getValue().timeout(TIMEOUT_VALUE)
: applyReliableDownload(rawResponse.getValue(), -1, options);
return value.switchIfEmpty(Flux.just(ByteBuffer.wrap(new byte[0])));
}
private Flux<ByteBuffer> applyReliableDownload(Flux<ByteBuffer> data, int currentRetryCount,
DownloadRetryOptions options) {
return data
.timeout(TIMEOUT_VALUE)
.doOnNext(buffer -> {
/*
Update how much data we have received in case we need to retry and propagate to the user the data we
have received.
*/
this.info.setOffset(this.info.getOffset() + buffer.remaining());
if (this.info.getCount() != null) {
this.info.setCount(this.info.getCount() - buffer.remaining());
}
}).onErrorResume(t2 -> {
return tryContinueFlux(t2, currentRetryCount + 1, options);
});
}
} |
sounds good. | private Flux<ByteBuffer> tryContinueFlux(Throwable t, int retryCount, DownloadRetryOptions options) {
if (retryCount >= options.getMaxRetryRequests()
|| !(t instanceof IOException || t instanceof TimeoutException)) {
return Flux.error(t);
} else {
/*
We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably
come from an unsuccessful request, which would be propagated through the onError methods. However, it is
possible the method call that returns a Single is what throws (like how our apis throw some exceptions at
call time rather than at subscription time.
*/
try {
/*Get a new stream from the new response and try reading from it.
Do not compound the number of retries by calling getValue on the DownloadResponse; just get
the raw body.
*/
return getter.apply(info)
.flatMapMany(newResponse ->
applyReliableDownload(newResponse.rawResponse.getValue(), retryCount, options));
} catch (Exception e) {
return Flux.error(e);
}
}
} | if (retryCount >= options.getMaxRetryRequests() | private Flux<ByteBuffer> tryContinueFlux(Throwable t, int retryCount, DownloadRetryOptions options) {
if (retryCount >= options.getMaxRetryRequests()
|| !(t instanceof IOException || t instanceof TimeoutException)) {
return Flux.error(t);
} else {
/*
We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably
come from an unsuccessful request, which would be propagated through the onError methods. However, it is
possible the method call that returns a Single is what throws (like how our apis throw some exceptions at
call time rather than at subscription time.
*/
try {
/*Get a new stream from the new response and try reading from it.
Do not compound the number of retries by calling getValue on the DownloadResponse; just get
the raw body.
*/
return getter.apply(info)
.flatMapMany(newResponse ->
applyReliableDownload(newResponse.rawResponse.getValue(), retryCount, options));
} catch (Exception e) {
return Flux.error(e);
}
}
} | class ReliableDownload {
private static final Duration TIMEOUT_VALUE = Duration.ofSeconds(60);
private final BlobsDownloadResponse rawResponse;
private final DownloadRetryOptions options;
private final HttpGetterInfo info;
private final Function<HttpGetterInfo, Mono<ReliableDownload>> getter;
ReliableDownload(BlobsDownloadResponse rawResponse, DownloadRetryOptions options, HttpGetterInfo info,
Function<HttpGetterInfo, Mono<ReliableDownload>> getter) {
StorageImplUtils.assertNotNull("getter", getter);
StorageImplUtils.assertNotNull("info", info);
StorageImplUtils.assertNotNull("info.eTag", info.getETag());
this.rawResponse = rawResponse;
this.options = (options == null) ? new DownloadRetryOptions() : options;
this.info = info;
this.getter = getter;
}
HttpRequest getRequest() {
return rawResponse.getRequest();
}
int getStatusCode() {
return rawResponse.getStatusCode();
}
HttpHeaders getHeaders() {
return rawResponse.getHeaders();
}
BlobDownloadHeaders getDeserializedHeaders() {
return rawResponse.getDeserializedHeaders();
}
Flux<ByteBuffer> getValue() {
/*
We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of
retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will
add 1 before calling into tryContinueFlux, we set the initial value to -1.
*/
Flux<ByteBuffer> value = (options.getMaxRetryRequests() == 0)
? rawResponse.getValue().timeout(TIMEOUT_VALUE)
: applyReliableDownload(rawResponse.getValue(), -1, options);
return value.switchIfEmpty(Flux.just(ByteBuffer.wrap(new byte[0])));
}
private Flux<ByteBuffer> applyReliableDownload(Flux<ByteBuffer> data, int currentRetryCount,
DownloadRetryOptions options) {
return data
.timeout(TIMEOUT_VALUE)
.doOnNext(buffer -> {
/*
Update how much data we have received in case we need to retry and propagate to the user the data we
have received.
*/
this.info.setOffset(this.info.getOffset() + buffer.remaining());
if (this.info.getCount() != null) {
this.info.setCount(this.info.getCount() - buffer.remaining());
}
}).onErrorResume(t2 -> {
return tryContinueFlux(t2, currentRetryCount + 1, options);
});
}
} | class ReliableDownload {
private static final Duration TIMEOUT_VALUE = Duration.ofSeconds(60);
private final BlobsDownloadResponse rawResponse;
private final DownloadRetryOptions options;
private final HttpGetterInfo info;
private final Function<HttpGetterInfo, Mono<ReliableDownload>> getter;
ReliableDownload(BlobsDownloadResponse rawResponse, DownloadRetryOptions options, HttpGetterInfo info,
Function<HttpGetterInfo, Mono<ReliableDownload>> getter) {
StorageImplUtils.assertNotNull("getter", getter);
StorageImplUtils.assertNotNull("info", info);
StorageImplUtils.assertNotNull("info.eTag", info.getETag());
this.rawResponse = rawResponse;
this.options = (options == null) ? new DownloadRetryOptions() : options;
this.info = info;
this.getter = getter;
}
HttpRequest getRequest() {
return rawResponse.getRequest();
}
int getStatusCode() {
return rawResponse.getStatusCode();
}
HttpHeaders getHeaders() {
return rawResponse.getHeaders();
}
BlobDownloadHeaders getDeserializedHeaders() {
return rawResponse.getDeserializedHeaders();
}
Flux<ByteBuffer> getValue() {
/*
We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of
retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will
add 1 before calling into tryContinueFlux, we set the initial value to -1.
*/
Flux<ByteBuffer> value = (options.getMaxRetryRequests() == 0)
? rawResponse.getValue().timeout(TIMEOUT_VALUE)
: applyReliableDownload(rawResponse.getValue(), -1, options);
return value.switchIfEmpty(Flux.just(ByteBuffer.wrap(new byte[0])));
}
private Flux<ByteBuffer> applyReliableDownload(Flux<ByteBuffer> data, int currentRetryCount,
DownloadRetryOptions options) {
return data
.timeout(TIMEOUT_VALUE)
.doOnNext(buffer -> {
/*
Update how much data we have received in case we need to retry and propagate to the user the data we
have received.
*/
this.info.setOffset(this.info.getOffset() + buffer.remaining());
if (this.info.getCount() != null) {
this.info.setCount(this.info.getCount() - buffer.remaining());
}
}).onErrorResume(t2 -> {
return tryContinueFlux(t2, currentRetryCount + 1, options);
});
}
} |
Can we check to ensure one isn't already present? I suppose it wouldn't matter because we remove the encryption metadata, so it wouldn't double decrypt, but it's probably safer if we check. | public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
checkValidEncryptionParameters();
HttpPipeline pipeline = null;
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
policies.add(currPolicy);
}
pipeline = new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
this.httpPipeline = pipeline;
return this;
} | for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { | public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
checkValidEncryptionParameters();
HttpPipeline pipeline = null;
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
boolean decryptionPolicyPresent = false;
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
decryptionPolicyPresent |= currPolicy instanceof BlobDecryptionPolicy;
policies.add(currPolicy);
}
if (!decryptionPolicyPresent) {
policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver));
}
pipeline = new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
this.httpPipeline = pipeline;
return this;
} | class EncryptedBlobClientBuilder {
private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private SasTokenCredential sasTokenCredential;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions = new RequestRetryOptions();
private HttpPipeline httpPipeline;
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient}
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient}
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(getHttpPipeline(),
String.format("%s/%s/%s", endpoint, containerName, blobName), serviceVersion, accountName, containerName,
blobName, snapshot, customerProvidedKey, keyWrapper, keyWrapAlgorithm);
}
private HttpPipeline getHttpPipeline() {
if (httpPipeline != null) {
return httpPipeline;
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
String clientName = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(logOptions.getApplicationId(), clientName, clientVersion,
userAgentConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddDatePolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, logger);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)));
} else if (sasTokenCredential != null) {
policies.add(new SasTokenCredentialPolicy(sasTokenCredential));
}
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RequestRetryPolicy(retryOptions));
policies.addAll(additionalPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption
* key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service.
*
* @param credential {@link TokenCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken,
"'sasToken' cannot be null."));
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, logger);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName();
this.blobName = Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending a receiving requests to and from the service.
*
* @param httpClient HttpClient to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy} to apply on each request sent. The policy will be added after the retry policy.
* If the method is called multiple times, all policies will be added and their order preserved.
*
* @param pipelinePolicy a pipeline policy
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"));
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage whitelist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code retryOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client, and adds a decryption policy.
*
* If {@code pipeline} is set, all other settings are ignored, aside from {@link
*
* <p>Use this method after setting the key in {@link
* {@link
*
* @param httpPipeline HttpPipeline to use for sending service requests and receiving responses.
* @return the updated EncryptedBlobClientBuilder object
*/
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that this method does not copy over the {@link CustomerProvidedKey} and encryption scope properties
* from the provided client. To set CPK, please use {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that this method does not copy over the {@link CustomerProvidedKey} and encryption scope properties
* from the provided client. To set CPK, please use {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
} | class EncryptedBlobClientBuilder {
private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private SasTokenCredential sasTokenCredential;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions = new RequestRetryOptions();
private HttpPipeline httpPipeline;
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient}
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient}
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(getHttpPipeline(),
String.format("%s/%s/%s", endpoint, containerName, blobName), serviceVersion, accountName, containerName,
blobName, snapshot, customerProvidedKey, keyWrapper, keyWrapAlgorithm);
}
private HttpPipeline getHttpPipeline() {
if (httpPipeline != null) {
return httpPipeline;
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
String clientName = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(logOptions.getApplicationId(), clientName, clientVersion,
userAgentConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddDatePolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, logger);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)));
} else if (sasTokenCredential != null) {
policies.add(new SasTokenCredentialPolicy(sasTokenCredential));
}
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RequestRetryPolicy(retryOptions));
policies.addAll(additionalPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption
* key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service.
*
* @param credential {@link TokenCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken,
"'sasToken' cannot be null."));
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, logger);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName();
this.blobName = Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending a receiving requests to and from the service.
*
* @param httpClient HttpClient to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy} to apply on each request sent. The policy will be added after the retry policy.
* If the method is called multiple times, all policies will be added and their order preserved.
*
* @param pipelinePolicy a pipeline policy
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"));
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage whitelist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code retryOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client, and adds a decryption policy if one is not present.
*
* If {@code pipeline} is set, all other settings are ignored, aside from {@link
* and {@link
*
* <p>Use this method after setting the key in {@link
* {@link
*
* @param httpPipeline HttpPipeline to use for sending service requests and receiving responses.
* @return the updated EncryptedBlobClientBuilder object
*/
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
} |
will do | public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
checkValidEncryptionParameters();
HttpPipeline pipeline = null;
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
policies.add(currPolicy);
}
pipeline = new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
this.httpPipeline = pipeline;
return this;
} | for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { | public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
checkValidEncryptionParameters();
HttpPipeline pipeline = null;
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
boolean decryptionPolicyPresent = false;
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
decryptionPolicyPresent |= currPolicy instanceof BlobDecryptionPolicy;
policies.add(currPolicy);
}
if (!decryptionPolicyPresent) {
policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver));
}
pipeline = new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
this.httpPipeline = pipeline;
return this;
} | class EncryptedBlobClientBuilder {
private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private SasTokenCredential sasTokenCredential;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions = new RequestRetryOptions();
private HttpPipeline httpPipeline;
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient}
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient}
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(getHttpPipeline(),
String.format("%s/%s/%s", endpoint, containerName, blobName), serviceVersion, accountName, containerName,
blobName, snapshot, customerProvidedKey, keyWrapper, keyWrapAlgorithm);
}
private HttpPipeline getHttpPipeline() {
if (httpPipeline != null) {
return httpPipeline;
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
String clientName = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(logOptions.getApplicationId(), clientName, clientVersion,
userAgentConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddDatePolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, logger);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)));
} else if (sasTokenCredential != null) {
policies.add(new SasTokenCredentialPolicy(sasTokenCredential));
}
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RequestRetryPolicy(retryOptions));
policies.addAll(additionalPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption
* key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service.
*
* @param credential {@link TokenCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken,
"'sasToken' cannot be null."));
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, logger);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName();
this.blobName = Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending a receiving requests to and from the service.
*
* @param httpClient HttpClient to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy} to apply on each request sent. The policy will be added after the retry policy.
* If the method is called multiple times, all policies will be added and their order preserved.
*
* @param pipelinePolicy a pipeline policy
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"));
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage whitelist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code retryOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client, and adds a decryption policy.
*
* If {@code pipeline} is set, all other settings are ignored, aside from {@link
*
* <p>Use this method after setting the key in {@link
* {@link
*
* @param httpPipeline HttpPipeline to use for sending service requests and receiving responses.
* @return the updated EncryptedBlobClientBuilder object
*/
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that this method does not copy over the {@link CustomerProvidedKey} and encryption scope properties
* from the provided client. To set CPK, please use {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that this method does not copy over the {@link CustomerProvidedKey} and encryption scope properties
* from the provided client. To set CPK, please use {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
} | class EncryptedBlobClientBuilder {
private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private SasTokenCredential sasTokenCredential;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions = new RequestRetryOptions();
private HttpPipeline httpPipeline;
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient}
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient}
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(getHttpPipeline(),
String.format("%s/%s/%s", endpoint, containerName, blobName), serviceVersion, accountName, containerName,
blobName, snapshot, customerProvidedKey, keyWrapper, keyWrapAlgorithm);
}
private HttpPipeline getHttpPipeline() {
if (httpPipeline != null) {
return httpPipeline;
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
String clientName = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(logOptions.getApplicationId(), clientName, clientVersion,
userAgentConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddDatePolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, logger);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)));
} else if (sasTokenCredential != null) {
policies.add(new SasTokenCredentialPolicy(sasTokenCredential));
}
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RequestRetryPolicy(retryOptions));
policies.addAll(additionalPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption
* key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service.
*
* @param credential {@link TokenCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken,
"'sasToken' cannot be null."));
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, logger);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName();
this.blobName = Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending a receiving requests to and from the service.
*
* @param httpClient HttpClient to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy} to apply on each request sent. The policy will be added after the retry policy.
* If the method is called multiple times, all policies will be added and their order preserved.
*
* @param pipelinePolicy a pipeline policy
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"));
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage whitelist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code retryOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client, and adds a decryption policy if one is not present.
*
* If {@code pipeline} is set, all other settings are ignored, aside from {@link
* and {@link
*
* <p>Use this method after setting the key in {@link
* {@link
*
* @param httpPipeline HttpPipeline to use for sending service requests and receiving responses.
* @return the updated EncryptedBlobClientBuilder object
*/
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service.
*
* <p>Use this method after setting the key in {@link
* {@link
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
} |
Oops! | BlobClient toBlobClient() throws IOException {
Path root = this.normalize().toAbsolutePath().getRoot();
if (root == null) {
throw Utility.logError(logger,
new IllegalStateException("Root should never be null after calling toAbsolutePath."));
}
String fileStoreName = this.rootToFileStore(root.toString());
BlobContainerClient containerClient =
((AzureFileStore) this.parentFileSystem.getFileStore(fileStoreName)).getContainerClient();
String blobName = this.withoutRoot();
if (blobName.isEmpty()) {
<<<<<<< HEAD
throw new IOException("Cannot get a blob client to a path that only contains the root or is an empty path");
}
return containerClient.getBlobClient(blobName);
} | <<<<<<< HEAD | BlobClient toBlobClient() throws IOException {
Path root = this.normalize().toAbsolutePath().getRoot();
if (root == null) {
throw Utility.logError(logger,
new IllegalStateException("Root should never be null after calling toAbsolutePath."));
}
String fileStoreName = this.rootToFileStore(root.toString());
BlobContainerClient containerClient =
((AzureFileStore) this.parentFileSystem.getFileStore(fileStoreName)).getContainerClient();
String blobName = this.withoutRoot();
if (blobName.isEmpty()) {
throw new IOException("Cannot get a blob client to a path that only contains the root or is an empty path");
}
return containerClient.getBlobClient(blobName);
} | class AzurePath implements Path {
private final ClientLogger logger = new ClientLogger(AzurePath.class);
static final String ROOT_DIR_SUFFIX = ":";
private final AzureFileSystem parentFileSystem;
private final String pathString;
AzurePath(AzureFileSystem parentFileSystem, String first, String... more) {
this.parentFileSystem = parentFileSystem;
/*
Break all strings into their respective elements and remove empty elements. This has the effect of stripping
any trailing, leading, or internal delimiters so there are no duplicates/empty elements when we join.
*/
List<String> elements = new ArrayList<>(Arrays.asList(first.split(parentFileSystem.getSeparator())));
if (more != null) {
for (String next : more) {
elements.addAll(Arrays.asList(next.split(parentFileSystem.getSeparator())));
}
}
elements.removeIf(String::isEmpty);
this.pathString = String.join(this.parentFileSystem.getSeparator(), elements);
for (int i = 0; i < elements.size(); i++) {
String element = elements.get(i);
/*
If there is a root component, it must be the first element. A root component takes the format of
"<fileStoreName>:". The ':', or ROOT_DIR_SUFFIX, if present, can only appear once, and can only be the last
character of the first element.
*/
if (i == 0) {
if (element.contains(ROOT_DIR_SUFFIX) && element.indexOf(ROOT_DIR_SUFFIX) < element.length() - 1) {
throw Utility.logError(logger, new InvalidPathException(this.pathString, ROOT_DIR_SUFFIX + " may"
+ " only be used as the last character in the root component of a path"));
}
} else if (element.contains(ROOT_DIR_SUFFIX)) {
throw Utility.logError(logger, new InvalidPathException(this.pathString, ROOT_DIR_SUFFIX + " is an "
+ "invalid character except to identify the root element of this path if there is one."));
}
}
}
/**
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem() {
return this.parentFileSystem;
}
/**
* A path is considered absolute in this file system if it contains a root component.
*
* {@inheritDoc}
*/
@Override
public boolean isAbsolute() {
return this.getRoot() != null;
}
/**
* The root component of this path also identifies the Azure Storage Container in which the file is stored. This
* method will not validate that the root component corresponds to an actual file store/container in this
* file system. It will simply return the root component of the path if one is present and syntactically valid.
*
* {@inheritDoc}
*/
@Override
public Path getRoot() {
String[] elements = this.splitToElements();
if (elements.length > 0 && elements[0].endsWith(ROOT_DIR_SUFFIX)) {
return this.parentFileSystem.getPath(elements[0]);
}
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Path getFileName() {
if (this.isRoot()) {
return null;
} else if (this.pathString.isEmpty()) {
return this;
} else {
List<String> elements = Arrays.asList(this.splitToElements());
return this.parentFileSystem.getPath(elements.get(elements.size() - 1));
}
}
/**
* {@inheritDoc}
*/
@Override
public Path getParent() {
/*
If this path only has one element or is empty, there is no parent. Note the root is included in the parent, so
we don't use getNameCount here.
*/
String[] elements = this.splitToElements();
if (elements.length == 1 || elements.length == 0) {
return null;
}
return this.parentFileSystem.getPath(
this.pathString.substring(0, this.pathString.lastIndexOf(this.parentFileSystem.getSeparator())));
}
/**
* {@inheritDoc}
*/
@Override
public int getNameCount() {
if (this.pathString.isEmpty()) {
return 1;
}
return this.splitToElements(this.withoutRoot()).length;
}
/**
* {@inheritDoc}
*/
@Override
public Path getName(int i) {
if (i < 0 || i >= this.getNameCount()) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Index %d is out of bounds", i)));
}
if (this.pathString.isEmpty()) {
return this;
}
return this.parentFileSystem.getPath(this.splitToElements(this.withoutRoot())[i]);
}
/**
* {@inheritDoc}
*/
@Override
public Path subpath(int begin, int end) {
if (begin < 0 || begin >= this.getNameCount()
|| end <= begin || end > this.getNameCount()) {
throw Utility.logError(logger,
new IllegalArgumentException(String.format("Values of begin: %d and end: %d are invalid", begin, end)));
}
String[] subnames = Stream.of(this.splitToElements(this.withoutRoot()))
.skip(begin)
.limit(end - begin)
.toArray(String[]::new);
return this.parentFileSystem.getPath(String.join(this.parentFileSystem.getSeparator(), subnames));
}
/**
* In this implementation, a root component starts with another root component if the two root components are
* equivalent strings. In other words, if the files are stored in the same container.
*
* {@inheritDoc}
*/
@Override
public boolean startsWith(Path path) {
if (!path.getFileSystem().equals(this.parentFileSystem)) {
return false;
}
if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) {
return false;
}
String[] thisPathElements = this.splitToElements();
String[] otherPathElements = ((AzurePath) path).splitToElements();
if (otherPathElements.length > thisPathElements.length) {
return false;
}
for (int i = 0; i < otherPathElements.length; i++) {
if (!otherPathElements[i].equals(thisPathElements[i])) {
return false;
}
}
return true;
}
/**
* {@inheritDoc}
*/
@Override
public boolean startsWith(String s) {
return this.startsWith(this.parentFileSystem.getPath(s));
}
/**
* In this implementation, a root component ends with another root component if the two root components are
* equivalent strings. In other words, if the files are stored in the same container.
*
* {@inheritDoc}
*/
@Override
public boolean endsWith(Path path) {
/*
There can only be one instance of a file system with a given id, so comparing object identity is equivalent
to checking ids here.
*/
if (path.getFileSystem() != this.parentFileSystem) {
return false;
}
if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) {
return false;
}
String[] thisPathElements = this.splitToElements();
String[] otherPathElements = ((AzurePath) path).splitToElements();
if (otherPathElements.length > thisPathElements.length) {
return false;
}
if (path.getRoot() != null && otherPathElements.length != thisPathElements.length) {
return false;
}
for (int i = 1; i <= otherPathElements.length; i++) {
if (!otherPathElements[otherPathElements.length - i]
.equals(thisPathElements[thisPathElements.length - i])) {
return false;
}
}
return true;
}
/**
* {@inheritDoc}
*/
@Override
public boolean endsWith(String s) {
return this.endsWith(this.parentFileSystem.getPath(s));
}
/**
* This file system follows the standard practice mentioned in the original docs.
*
* {@inheritDoc}
*/
@Override
public Path normalize() {
Deque<String> stack = new ArrayDeque<>();
String[] pathElements = this.splitToElements();
Path root = this.getRoot();
String rootStr = root == null ? null : root.toString();
for (String element : pathElements) {
if (element.equals(".")) {
continue;
} else if (element.equals("..")) {
if (rootStr != null) {
if (!stack.isEmpty() && stack.peekLast().equals(rootStr)) {
continue;
} else {
stack.removeLast();
}
} else {
if (stack.isEmpty()) {
stack.addLast(element);
} else if (stack.peek().equals("..")) {
stack.addLast(element);
} else {
stack.removeLast();
}
}
} else {
stack.addLast(element);
}
}
return this.parentFileSystem.getPath("", stack.toArray(new String[0]));
}
/**
* If the other path has a root component, it is considered absolute, and it is returned.
*
* {@inheritDoc}
*/
@Override
public Path resolve(Path path) {
if (path.isAbsolute()) {
return path;
}
if (path.getNameCount() == 0) {
return this;
}
return this.parentFileSystem.getPath(this.toString(), path.toString());
}
/**
* {@inheritDoc}
*/
@Override
public Path resolve(String s) {
return this.resolve(this.parentFileSystem.getPath(s));
}
/**
* {@inheritDoc}
*/
@Override
public Path resolveSibling(Path path) {
if (path.isAbsolute()) {
return path;
}
Path parent = this.getParent();
return parent == null ? path : parent.resolve(path);
}
/**
* {@inheritDoc}
*/
@Override
public Path resolveSibling(String s) {
return this.resolveSibling(this.parentFileSystem.getPath(s));
}
/**
* If both paths have a root component, it is still to relativize one against the other.
*
* {@inheritDoc}
*/
@Override
public Path relativize(Path path) {
if (path.getRoot() == null ^ this.getRoot() == null) {
throw Utility.logError(logger,
new IllegalArgumentException("Both paths must be absolute or neither can be"));
}
AzurePath thisNormalized = (AzurePath) this.normalize();
Path otherNormalized = path.normalize();
Deque<String> deque = new ArrayDeque<>(
Arrays.asList(otherNormalized.toString().split(this.parentFileSystem.getSeparator())));
int i = 0;
String[] thisElements = thisNormalized.splitToElements();
while (i < thisElements.length && !deque.isEmpty() && thisElements[i].equals(deque.peekFirst())) {
deque.removeFirst();
i++;
}
while (i < thisElements.length) {
deque.addFirst("..");
i++;
}
return this.parentFileSystem.getPath("", deque.toArray(new String[0]));
}
/**
* No authority component is defined for the {@code URI} returned by this method. This implementation offers the
* same equivalence guarantee as the default provider.
*
* {@inheritDoc}
*/
@Override
public URI toUri() {
try {
return new URI(this.parentFileSystem.provider().getScheme(), null, "/" + this.toAbsolutePath().toString(),
null, null);
} catch (URISyntaxException e) {
throw Utility.logError(logger, new IllegalStateException("Unable to create valid URI from path", e));
}
}
/**
* {@inheritDoc}
*/
@Override
public Path toAbsolutePath() {
if (this.isAbsolute()) {
return this;
}
return this.parentFileSystem.getDefaultDirectory().resolve(this);
}
/**
* Unsupported.
* <p>
* {@inheritDoc}
*/
@Override
public Path toRealPath(LinkOption... linkOptions) throws IOException {
throw new UnsupportedOperationException("Symbolic links are not supported.");
}
/**
* {@inheritDoc}
*/
@Override
public File toFile() {
throw new UnsupportedOperationException();
}
/**
* Unsupported.
* <p>
* {@inheritDoc}
*/
@Override
public WatchKey register(WatchService watchService, WatchEvent.Kind<?>[] kinds, WatchEvent.Modifier... modifiers)
throws IOException {
throw new UnsupportedOperationException("WatchEvents are not supported.");
}
/**
* Unsupported.
* <p>
* {@inheritDoc}
*/
@Override
public WatchKey register(WatchService watchService, WatchEvent.Kind<?>... kinds) throws IOException {
throw new UnsupportedOperationException("WatchEvents are not supported.");
}
/**
* Unsupported
* <p>
* {@inheritDoc}
*/
@Override
public Iterator<Path> iterator() {
if (this.pathString.isEmpty()) {
return Collections.singletonList((Path) this).iterator();
}
return Arrays.asList(Stream.of(this.splitToElements(this.withoutRoot()))
.map(s -> this.parentFileSystem.getPath(s))
.toArray(Path[]::new))
.iterator();
}
/**
* This result of this method is identical to a string comparison on the underlying path strings.
*
* {@inheritDoc}
*/
@Override
public int compareTo(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new ClassCastException("Other path is not an instance of AzurePath."));
}
return this.pathString.compareTo(((AzurePath) path).pathString);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return this.pathString;
}
/**
* A path is considered equal to another path if it is associated with the same file system instance and if the
* path strings are equivalent.
*
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AzurePath paths = (AzurePath) o;
return Objects.equals(parentFileSystem, paths.parentFileSystem)
&& Objects.equals(pathString, paths.pathString);
}
@Override
public int hashCode() {
return Objects.hash(parentFileSystem, pathString);
}
/*
We don't store the blob client because unlike other types in this package, a Path does not actually indicate the
existence or even validity of any remote resource. It is purely a representation of a path. Therefore, we do not
construct the client or perform any validation until it is requested.
*/
/**
* @return Whether this path consists of only a root component.
*/
boolean isRoot() {
return this.equals(this.getRoot());
=======
throw new IOException("Cannot get a blob client to a path that only contains the root");
}
return containerClient.getBlobClient(blobName);
>>>>>>> upstream/master
} | class AzurePath implements Path {
private final ClientLogger logger = new ClientLogger(AzurePath.class);
static final String ROOT_DIR_SUFFIX = ":";
private final AzureFileSystem parentFileSystem;
private final String pathString;
AzurePath(AzureFileSystem parentFileSystem, String first, String... more) {
this.parentFileSystem = parentFileSystem;
/*
Break all strings into their respective elements and remove empty elements. This has the effect of stripping
any trailing, leading, or internal delimiters so there are no duplicates/empty elements when we join.
*/
List<String> elements = new ArrayList<>(Arrays.asList(first.split(parentFileSystem.getSeparator())));
if (more != null) {
for (String next : more) {
elements.addAll(Arrays.asList(next.split(parentFileSystem.getSeparator())));
}
}
elements.removeIf(String::isEmpty);
this.pathString = String.join(this.parentFileSystem.getSeparator(), elements);
for (int i = 0; i < elements.size(); i++) {
String element = elements.get(i);
/*
If there is a root component, it must be the first element. A root component takes the format of
"<fileStoreName>:". The ':', or ROOT_DIR_SUFFIX, if present, can only appear once, and can only be the last
character of the first element.
*/
if (i == 0) {
if (element.contains(ROOT_DIR_SUFFIX) && element.indexOf(ROOT_DIR_SUFFIX) < element.length() - 1) {
throw Utility.logError(logger, new InvalidPathException(this.pathString, ROOT_DIR_SUFFIX + " may"
+ " only be used as the last character in the root component of a path"));
}
} else if (element.contains(ROOT_DIR_SUFFIX)) {
throw Utility.logError(logger, new InvalidPathException(this.pathString, ROOT_DIR_SUFFIX + " is an "
+ "invalid character except to identify the root element of this path if there is one."));
}
}
}
/**
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem() {
return this.parentFileSystem;
}
/**
* A path is considered absolute in this file system if it contains a root component.
*
* {@inheritDoc}
*/
@Override
public boolean isAbsolute() {
return this.getRoot() != null;
}
/**
* The root component of this path also identifies the Azure Storage Container in which the file is stored. This
* method will not validate that the root component corresponds to an actual file store/container in this
* file system. It will simply return the root component of the path if one is present and syntactically valid.
*
* {@inheritDoc}
*/
@Override
public Path getRoot() {
String[] elements = this.splitToElements();
if (elements.length > 0 && elements[0].endsWith(ROOT_DIR_SUFFIX)) {
return this.parentFileSystem.getPath(elements[0]);
}
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Path getFileName() {
if (this.isRoot()) {
return null;
} else if (this.pathString.isEmpty()) {
return this;
} else {
List<String> elements = Arrays.asList(this.splitToElements());
return this.parentFileSystem.getPath(elements.get(elements.size() - 1));
}
}
/**
* {@inheritDoc}
*/
@Override
public Path getParent() {
/*
If this path only has one element or is empty, there is no parent. Note the root is included in the parent, so
we don't use getNameCount here.
*/
String[] elements = this.splitToElements();
if (elements.length == 1 || elements.length == 0) {
return null;
}
return this.parentFileSystem.getPath(
this.pathString.substring(0, this.pathString.lastIndexOf(this.parentFileSystem.getSeparator())));
}
/**
* {@inheritDoc}
*/
@Override
public int getNameCount() {
if (this.pathString.isEmpty()) {
return 1;
}
return this.splitToElements(this.withoutRoot()).length;
}
/**
* {@inheritDoc}
*/
@Override
public Path getName(int i) {
if (i < 0 || i >= this.getNameCount()) {
throw Utility.logError(logger, new IllegalArgumentException(String.format("Index %d is out of bounds", i)));
}
if (this.pathString.isEmpty()) {
return this;
}
return this.parentFileSystem.getPath(this.splitToElements(this.withoutRoot())[i]);
}
/**
* {@inheritDoc}
*/
@Override
public Path subpath(int begin, int end) {
if (begin < 0 || begin >= this.getNameCount()
|| end <= begin || end > this.getNameCount()) {
throw Utility.logError(logger,
new IllegalArgumentException(String.format("Values of begin: %d and end: %d are invalid", begin, end)));
}
String[] subnames = Stream.of(this.splitToElements(this.withoutRoot()))
.skip(begin)
.limit(end - begin)
.toArray(String[]::new);
return this.parentFileSystem.getPath(String.join(this.parentFileSystem.getSeparator(), subnames));
}
/**
* In this implementation, a root component starts with another root component if the two root components are
* equivalent strings. In other words, if the files are stored in the same container.
*
* {@inheritDoc}
*/
@Override
public boolean startsWith(Path path) {
if (!path.getFileSystem().equals(this.parentFileSystem)) {
return false;
}
if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) {
return false;
}
String[] thisPathElements = this.splitToElements();
String[] otherPathElements = ((AzurePath) path).splitToElements();
if (otherPathElements.length > thisPathElements.length) {
return false;
}
for (int i = 0; i < otherPathElements.length; i++) {
if (!otherPathElements[i].equals(thisPathElements[i])) {
return false;
}
}
return true;
}
/**
* {@inheritDoc}
*/
@Override
public boolean startsWith(String s) {
return this.startsWith(this.parentFileSystem.getPath(s));
}
/**
* In this implementation, a root component ends with another root component if the two root components are
* equivalent strings. In other words, if the files are stored in the same container.
*
* {@inheritDoc}
*/
@Override
public boolean endsWith(Path path) {
/*
There can only be one instance of a file system with a given id, so comparing object identity is equivalent
to checking ids here.
*/
if (path.getFileSystem() != this.parentFileSystem) {
return false;
}
if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) {
return false;
}
String[] thisPathElements = this.splitToElements();
String[] otherPathElements = ((AzurePath) path).splitToElements();
if (otherPathElements.length > thisPathElements.length) {
return false;
}
if (path.getRoot() != null && otherPathElements.length != thisPathElements.length) {
return false;
}
for (int i = 1; i <= otherPathElements.length; i++) {
if (!otherPathElements[otherPathElements.length - i]
.equals(thisPathElements[thisPathElements.length - i])) {
return false;
}
}
return true;
}
/**
* {@inheritDoc}
*/
@Override
public boolean endsWith(String s) {
return this.endsWith(this.parentFileSystem.getPath(s));
}
/**
* This file system follows the standard practice mentioned in the original docs.
*
* {@inheritDoc}
*/
@Override
public Path normalize() {
Deque<String> stack = new ArrayDeque<>();
String[] pathElements = this.splitToElements();
Path root = this.getRoot();
String rootStr = root == null ? null : root.toString();
for (String element : pathElements) {
if (element.equals(".")) {
continue;
} else if (element.equals("..")) {
if (rootStr != null) {
if (!stack.isEmpty() && stack.peekLast().equals(rootStr)) {
continue;
} else {
stack.removeLast();
}
} else {
if (stack.isEmpty()) {
stack.addLast(element);
} else if (stack.peek().equals("..")) {
stack.addLast(element);
} else {
stack.removeLast();
}
}
} else {
stack.addLast(element);
}
}
return this.parentFileSystem.getPath("", stack.toArray(new String[0]));
}
/**
* If the other path has a root component, it is considered absolute, and it is returned.
*
* {@inheritDoc}
*/
@Override
public Path resolve(Path path) {
if (path.isAbsolute()) {
return path;
}
if (path.getNameCount() == 0) {
return this;
}
return this.parentFileSystem.getPath(this.toString(), path.toString());
}
/**
* {@inheritDoc}
*/
@Override
public Path resolve(String s) {
return this.resolve(this.parentFileSystem.getPath(s));
}
/**
* {@inheritDoc}
*/
@Override
public Path resolveSibling(Path path) {
if (path.isAbsolute()) {
return path;
}
Path parent = this.getParent();
return parent == null ? path : parent.resolve(path);
}
/**
* {@inheritDoc}
*/
@Override
public Path resolveSibling(String s) {
return this.resolveSibling(this.parentFileSystem.getPath(s));
}
/**
* If both paths have a root component, it is still to relativize one against the other.
*
* {@inheritDoc}
*/
@Override
public Path relativize(Path path) {
if (path.getRoot() == null ^ this.getRoot() == null) {
throw Utility.logError(logger,
new IllegalArgumentException("Both paths must be absolute or neither can be"));
}
AzurePath thisNormalized = (AzurePath) this.normalize();
Path otherNormalized = path.normalize();
Deque<String> deque = new ArrayDeque<>(
Arrays.asList(otherNormalized.toString().split(this.parentFileSystem.getSeparator())));
int i = 0;
String[] thisElements = thisNormalized.splitToElements();
while (i < thisElements.length && !deque.isEmpty() && thisElements[i].equals(deque.peekFirst())) {
deque.removeFirst();
i++;
}
while (i < thisElements.length) {
deque.addFirst("..");
i++;
}
return this.parentFileSystem.getPath("", deque.toArray(new String[0]));
}
/**
* No authority component is defined for the {@code URI} returned by this method. This implementation offers the
* same equivalence guarantee as the default provider.
*
* {@inheritDoc}
*/
@Override
public URI toUri() {
try {
return new URI(this.parentFileSystem.provider().getScheme(), null, "/" + this.toAbsolutePath().toString(),
null, null);
} catch (URISyntaxException e) {
throw Utility.logError(logger, new IllegalStateException("Unable to create valid URI from path", e));
}
}
/**
* {@inheritDoc}
*/
@Override
public Path toAbsolutePath() {
if (this.isAbsolute()) {
return this;
}
return this.parentFileSystem.getDefaultDirectory().resolve(this);
}
/**
* Unsupported.
* <p>
* {@inheritDoc}
*/
@Override
public Path toRealPath(LinkOption... linkOptions) throws IOException {
throw new UnsupportedOperationException("Symbolic links are not supported.");
}
/**
* {@inheritDoc}
*/
@Override
public File toFile() {
throw new UnsupportedOperationException();
}
/**
* Unsupported.
* <p>
* {@inheritDoc}
*/
@Override
public WatchKey register(WatchService watchService, WatchEvent.Kind<?>[] kinds, WatchEvent.Modifier... modifiers)
throws IOException {
throw new UnsupportedOperationException("WatchEvents are not supported.");
}
/**
* Unsupported.
* <p>
* {@inheritDoc}
*/
@Override
public WatchKey register(WatchService watchService, WatchEvent.Kind<?>... kinds) throws IOException {
throw new UnsupportedOperationException("WatchEvents are not supported.");
}
/**
* Unsupported
* <p>
* {@inheritDoc}
*/
@Override
public Iterator<Path> iterator() {
if (this.pathString.isEmpty()) {
return Collections.singletonList((Path) this).iterator();
}
return Arrays.asList(Stream.of(this.splitToElements(this.withoutRoot()))
.map(s -> this.parentFileSystem.getPath(s))
.toArray(Path[]::new))
.iterator();
}
/**
* This result of this method is identical to a string comparison on the underlying path strings.
*
* {@inheritDoc}
*/
@Override
public int compareTo(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new ClassCastException("Other path is not an instance of AzurePath."));
}
return this.pathString.compareTo(((AzurePath) path).pathString);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return this.pathString;
}
/**
* A path is considered equal to another path if it is associated with the same file system instance and if the
* path strings are equivalent.
*
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AzurePath paths = (AzurePath) o;
return Objects.equals(parentFileSystem, paths.parentFileSystem)
&& Objects.equals(pathString, paths.pathString);
}
@Override
public int hashCode() {
return Objects.hash(parentFileSystem, pathString);
}
/*
We don't store the blob client because unlike other types in this package, a Path does not actually indicate the
existence or even validity of any remote resource. It is purely a representation of a path. Therefore, we do not
construct the client or perform any validation until it is requested.
*/
/**
* @return Whether this path consists of only a root component.
*/
boolean isRoot() {
return this.equals(this.getRoot());
}
private String withoutRoot() {
Path root = this.getRoot();
String str = this.pathString;
if (root != null) {
str = this.pathString.substring(root.toString().length());
}
if (str.startsWith(this.parentFileSystem.getSeparator())) {
str = str.substring(1);
}
return str;
}
private String[] splitToElements() {
return this.splitToElements(this.pathString);
}
private String[] splitToElements(String str) {
String[] arr = str.split(this.parentFileSystem.getSeparator());
/*
This is a special case where we split after removing the root from a path that is just the root. Or otherwise
have an empty path.
*/
if (arr.length == 1 && arr[0].isEmpty()) {
return new String[0];
}
return arr;
}
private String rootToFileStore(String root) {
return root.substring(0, root.length() - 1);
}
} |
Do we want to log and throw an exception if the exception is that the blob doesn't exist? I'm good with either, just a theoretical question on what delete on something that doesn't exist means. | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
} | throw Utility.logError(logger, new IOException(e)); | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* As noted by the NIO docs, this method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* Creates a new directory at the specified path.
* <p>
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* Deletes the specified resource.
* <p>
* This method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* Copies the resource at the source location to the destination.
* <p>
* This method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} |
seems cachedpage removed, any concern on performance etc when original cachedpage adding? Or we may revisit it later when there's complain | protected void loadNextPage() {
this.items.addAll(pagedResponseIterator.next().getValue());
} | this.items.addAll(pagedResponseIterator.next().getValue()); | protected void loadNextPage() {
this.items.addAll(pagedResponseIterator.next().getValue());
} | class PagedList<E> implements List<E> {
/** The items retrieved. */
private final List<E> items;
/** The paged response iterator for not retrieved items. */
private Iterator<PagedResponse<E>> pagedResponseIterator;
/**
* Creates an instance of PagedList.
*/
public PagedList() {
items = new ArrayList<>();
pagedResponseIterator = Collections.emptyIterator();
}
/**
* Creates an instance of PagedList from a {@link PagedIterable}.
*
* @param pagedIterable the {@link PagedIterable} object.
*/
public PagedList(PagedIterable<E> pagedIterable) {
items = new ArrayList<>();
Objects.requireNonNull(pagedIterable, "'pagedIterable' cannot be null.");
this.pagedResponseIterator = pagedIterable.iterableByPage().iterator();
}
/**
* If there are more pages available.
*
* @return true if there are more pages to load. False otherwise.
*/
protected boolean hasNextPage() {
return pagedResponseIterator.hasNext();
}
/**
* Loads a page from next page link.
* The exceptions are wrapped into Java Runtime exceptions.
*/
/**
* Keep loading the next page from the next page link until all items are loaded.
*/
public void loadAll() {
while (hasNextPage()) {
loadNextPage();
}
}
@Override
public int size() {
loadAll();
return items.size();
}
@Override
public boolean isEmpty() {
return items.isEmpty() && !hasNextPage();
}
@Override
public boolean contains(Object o) {
return indexOf(o) >= 0;
}
@Override
public Iterator<E> iterator() {
return new ListItr(0);
}
@Override
public Object[] toArray() {
loadAll();
return items.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
loadAll();
return items.toArray(a);
}
@Override
public boolean add(E e) {
loadAll();
return items.add(e);
}
@Override
public boolean remove(Object o) {
int index = indexOf(o);
if (index != -1) {
items.remove(index);
return true;
} else {
return false;
}
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object e : c) {
if (!contains(e)) {
return false;
}
}
return true;
}
@Override
public boolean addAll(Collection<? extends E> c) {
return items.addAll(c);
}
@Override
public boolean addAll(int index, Collection<? extends E> c) {
return items.addAll(index, c);
}
@Override
public boolean removeAll(Collection<?> c) {
return items.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return items.retainAll(c);
}
@Override
public void clear() {
items.clear();
pagedResponseIterator = Collections.emptyIterator();
}
@Override
public E get(int index) {
tryLoadToIndex(index);
return items.get(index);
}
@Override
public E set(int index, E element) {
tryLoadToIndex(index);
return items.set(index, element);
}
@Override
public void add(int index, E element) {
items.add(index, element);
}
@Override
public E remove(int index) {
tryLoadToIndex(index);
return items.remove(index);
}
@Override
public int indexOf(Object o) {
int index = items.indexOf(o);
if (index != -1) {
return index;
}
while (hasNextPage()) {
int itemsSize = items.size();
List<E> nextPageItems = pagedResponseIterator.next().getValue();
this.items.addAll(nextPageItems);
index = nextPageItems.indexOf(o);
if (index != -1) {
index = itemsSize + index;
return index;
}
}
return -1;
}
@Override
public int lastIndexOf(Object o) {
loadAll();
return items.lastIndexOf(o);
}
@Override
public ListIterator<E> listIterator() {
return new ListItr(0);
}
@Override
public ListIterator<E> listIterator(int index) {
tryLoadToIndex(index);
return new ListItr(index);
}
@Override
public List<E> subList(int fromIndex, int toIndex) {
while ((fromIndex >= items.size() || toIndex >= items.size()) && hasNextPage()) {
loadNextPage();
}
return items.subList(fromIndex, toIndex);
}
private void tryLoadToIndex(int index) {
while (index >= items.size() && hasNextPage()) {
loadNextPage();
}
}
/**
* The implementation of {@link ListIterator} for PagedList.
*/
private class ListItr implements ListIterator<E> {
/**
* index of next element to return.
*/
private int nextIndex;
/**
* index of last element returned; -1 if no such action happened.
*/
private int lastRetIndex = -1;
/**
* Creates an instance of the ListIterator.
*
* @param index the position in the list to start.
*/
ListItr(int index) {
this.nextIndex = index;
}
@Override
public boolean hasNext() {
return this.nextIndex != items.size() || hasNextPage();
}
@Override
public E next() {
if (this.nextIndex >= items.size()) {
if (!hasNextPage()) {
throw new NoSuchElementException();
} else {
loadNextPage();
}
return next();
} else {
try {
E nextItem = items.get(this.nextIndex);
this.lastRetIndex = this.nextIndex;
this.nextIndex = this.nextIndex + 1;
return nextItem;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void remove() {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.remove(this.lastRetIndex);
this.nextIndex = this.lastRetIndex;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public boolean hasPrevious() {
return this.nextIndex != 0;
}
@Override
public E previous() {
int i = this.nextIndex - 1;
if (i < 0) {
throw new NoSuchElementException();
} else if (i >= items.size()) {
throw new ConcurrentModificationException();
} else {
try {
this.nextIndex = i;
this.lastRetIndex = i;
return items.get(this.lastRetIndex);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public int nextIndex() {
return this.nextIndex;
}
@Override
public int previousIndex() {
return this.nextIndex - 1;
}
@Override
public void set(E e) {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.set(this.lastRetIndex, e);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void add(E e) {
try {
items.add(this.nextIndex, e);
this.nextIndex = this.nextIndex + 1;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
} | class PagedList<E> implements List<E> {
/** The items retrieved. */
private final List<E> items;
/** The paged response iterator for not retrieved items. */
private Iterator<PagedResponse<E>> pagedResponseIterator;
/**
* Creates an instance of PagedList.
*/
public PagedList() {
items = new ArrayList<>();
pagedResponseIterator = Collections.emptyIterator();
}
/**
* Creates an instance of PagedList from a {@link PagedIterable}.
*
* @param pagedIterable the {@link PagedIterable} object.
*/
public PagedList(PagedIterable<E> pagedIterable) {
items = new ArrayList<>();
Objects.requireNonNull(pagedIterable, "'pagedIterable' cannot be null.");
this.pagedResponseIterator = pagedIterable.iterableByPage().iterator();
}
/**
* If there are more pages available.
*
* @return true if there are more pages to load. False otherwise.
*/
protected boolean hasNextPage() {
return pagedResponseIterator.hasNext();
}
/**
* Loads a page from next page link.
* The exceptions are wrapped into Java Runtime exceptions.
*/
/**
* Keep loading the next page from the next page link until all items are loaded.
*/
public void loadAll() {
while (hasNextPage()) {
loadNextPage();
}
}
@Override
public int size() {
loadAll();
return items.size();
}
@Override
public boolean isEmpty() {
return items.isEmpty() && !hasNextPage();
}
@Override
public boolean contains(Object o) {
return indexOf(o) >= 0;
}
@Override
public Iterator<E> iterator() {
return new ListItr(0);
}
@Override
public Object[] toArray() {
loadAll();
return items.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
loadAll();
return items.toArray(a);
}
@Override
public boolean add(E e) {
loadAll();
return items.add(e);
}
@Override
public boolean remove(Object o) {
int index = indexOf(o);
if (index != -1) {
items.remove(index);
return true;
} else {
return false;
}
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object e : c) {
if (!contains(e)) {
return false;
}
}
return true;
}
@Override
public boolean addAll(Collection<? extends E> c) {
return items.addAll(c);
}
@Override
public boolean addAll(int index, Collection<? extends E> c) {
return items.addAll(index, c);
}
@Override
public boolean removeAll(Collection<?> c) {
return items.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return items.retainAll(c);
}
@Override
public void clear() {
items.clear();
pagedResponseIterator = Collections.emptyIterator();
}
@Override
public E get(int index) {
tryLoadToIndex(index);
return items.get(index);
}
@Override
public E set(int index, E element) {
tryLoadToIndex(index);
return items.set(index, element);
}
@Override
public void add(int index, E element) {
items.add(index, element);
}
@Override
public E remove(int index) {
tryLoadToIndex(index);
return items.remove(index);
}
@Override
public int indexOf(Object o) {
int index = items.indexOf(o);
if (index != -1) {
return index;
}
while (hasNextPage()) {
int itemsSize = items.size();
List<E> nextPageItems = pagedResponseIterator.next().getValue();
this.items.addAll(nextPageItems);
index = nextPageItems.indexOf(o);
if (index != -1) {
index = itemsSize + index;
return index;
}
}
return -1;
}
@Override
public int lastIndexOf(Object o) {
loadAll();
return items.lastIndexOf(o);
}
@Override
public ListIterator<E> listIterator() {
return new ListItr(0);
}
@Override
public ListIterator<E> listIterator(int index) {
tryLoadToIndex(index);
return new ListItr(index);
}
@Override
public List<E> subList(int fromIndex, int toIndex) {
while ((fromIndex >= items.size() || toIndex >= items.size()) && hasNextPage()) {
loadNextPage();
}
return items.subList(fromIndex, toIndex);
}
private void tryLoadToIndex(int index) {
while (index >= items.size() && hasNextPage()) {
loadNextPage();
}
}
/**
* The implementation of {@link ListIterator} for PagedList.
*/
private class ListItr implements ListIterator<E> {
/**
* index of next element to return.
*/
private int nextIndex;
/**
* index of last element returned; -1 if no such action happened.
*/
private int lastRetIndex = -1;
/**
* Creates an instance of the ListIterator.
*
* @param index the position in the list to start.
*/
ListItr(int index) {
this.nextIndex = index;
}
@Override
public boolean hasNext() {
return this.nextIndex != items.size() || hasNextPage();
}
@Override
public E next() {
if (this.nextIndex >= items.size()) {
if (!hasNextPage()) {
throw new NoSuchElementException();
} else {
loadNextPage();
}
return next();
} else {
try {
E nextItem = items.get(this.nextIndex);
this.lastRetIndex = this.nextIndex;
this.nextIndex = this.nextIndex + 1;
return nextItem;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void remove() {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.remove(this.lastRetIndex);
this.nextIndex = this.lastRetIndex;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public boolean hasPrevious() {
return this.nextIndex != 0;
}
@Override
public E previous() {
int i = this.nextIndex - 1;
if (i < 0) {
throw new NoSuchElementException();
} else if (i >= items.size()) {
throw new ConcurrentModificationException();
} else {
try {
this.nextIndex = i;
this.lastRetIndex = i;
return items.get(this.lastRetIndex);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public int nextIndex() {
return this.nextIndex;
}
@Override
public int previousIndex() {
return this.nextIndex - 1;
}
@Override
public void set(E e) {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.set(this.lastRetIndex, e);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void add(E e) {
try {
items.add(this.nextIndex, e);
this.nextIndex = this.nextIndex + 1;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
} |
Won't checkDirStatus do the blob doesn't exist validation for us? It gets the container client and lists paths with path = blob name and returns diff codes depending on the number of elements underneath it | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
} | throw Utility.logError(logger, new IOException(e)); | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* As noted by the NIO docs, this method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* Creates a new directory at the specified path.
* <p>
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* Deletes the specified resource.
* <p>
* This method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* Copies the resource at the source location to the destination.
* <p>
* This method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} |
I suppose something could also happen between that check and the delete | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
} | throw Utility.logError(logger, new IOException(e)); | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* As noted by the NIO docs, this method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* Creates a new directory at the specified path.
* <p>
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* Deletes the specified resource.
* <p>
* This method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* Copies the resource at the source location to the destination.
* <p>
* This method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} |
It's true we do check for no such blob based on the dir status earlier and also true that something else could happen in between. I added a specific case for a BlobNotFound error code and throw a NoSuchFileException in that case as well. I would prefer to throw in the case of no such file because the docs say we can, so it's more informative and because there's a deleteIfExists api that customers can call if they don't want the exception behavior. | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
} | throw Utility.logError(logger, new IOException(e)); | public void delete(Path path) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
validateNotRoot(path, "Delete");
BlobClient blobClient = aPath.toBlobClient();
DirectoryStatus dirStatus = checkDirStatus(blobClient);
if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(path.toString()));
}
try {
blobClient.delete();
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {
throw Utility.logError(logger, new NoSuchFileException(path.toString()));
}
throw Utility.logError(logger, new IOException(e));
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* As noted by the NIO docs, this method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* As stated in the nio docs, this method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} | class AzureFileSystemProvider extends FileSystemProvider {
private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class);
public static final String CONTENT_TYPE = "Content-Type";
public static final String CONTENT_DISPOSITION = "Content-Disposition";
public static final String CONTENT_LANGUAGE = "Content-Language";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CACHE_CONTROL = "Cache-Control";
private static final String ACCOUNT_QUERY_KEY = "account";
private static final int COPY_TIMEOUT_SECONDS = 30;
static final String DIR_METADATA_MARKER = "is_hdi_folder";
private final ConcurrentMap<String, FileSystem> openFileSystems;
/**
* Creates an AzureFileSystemProvider.
*/
public AzureFileSystemProvider() {
this.openFileSystems = new ConcurrentHashMap<>();
}
/**
* Returns {@code "azb".}
*/
@Override
public String getScheme() {
return "azb";
}
/**
* The format of a {@code URI} identifying a file system is {@code "azb:
* <p>
* Once closed, a file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException {
String accountName = extractAccountName(uri);
if (this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName));
}
AzureFileSystem afs = new AzureFileSystem(this, accountName, config);
this.openFileSystems.put(accountName, afs);
return afs;
}
/**
* The format of a {@code URI} identifying an file system is {@code "azb:
* <p>
* Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a
* file system with the same identifier may be reopened.
* {@inheritDoc}
*/
@Override
public FileSystem getFileSystem(URI uri) {
String accountName = extractAccountName(uri);
if (!this.openFileSystems.containsKey(accountName)) {
throw Utility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName));
}
return this.openFileSystems.get(accountName);
}
/**
* {@inheritDoc}
*/
@Override
public Path getPath(URI uri) {
return getFileSystem(uri).getPath(uri.getPath());
}
/**
* {@inheritDoc}
*/
@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set,
FileAttribute<?>... fileAttributes) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter)
throws IOException {
return null;
}
/**
* Creates a new directory at the specified path.
* <p>
* The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is
* defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also
* known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded
* with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as
* the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length
* blob whose name is the directory path with a particular metadata field indicating the blob's status as a
* directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will
* strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target
* marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name
* prefix.
* <p>
* This method fulfills the nio contract of: "The check for the existence of the file and the creation of the
* directory if it does not exist are a single operation that is atomic with respect to all other filesystem
* activities that might affect the directory." More specifically, this method will atomically check for <i>strong
* existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we
* only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the
* action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that
* while it is possible that the parent may be deleted between when the parent is determined to exist and the
* creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the
* child will never be left floating and unreachable. The different checks on parent and child is due to limitations
* in the Storage service API.
* <p>
* There may be some unintuitive behavior when working with directories in this file system, particularly virtual
* directories(usually those not created by this file system). A virtual directory will disappear as soon as all its
* children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of calling
* this method, this method will still return success and create a concrete directory at the target location.
* In other words, it is possible to "double create" a directory if it first weakly exists and then is strongly
* created. This is both because it is impossible to atomically check if a virtual directory exists while creating a
* concrete directory and because such behavior will have minimal side effects--no files will be overwritten and the
* directory will still be available for writing as intended, though it may not be empty.
* <p>
* This method will attempt to extract standard HTTP content headers from the list of file attributes to set them
* as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be
* converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}.
* When extracting the content headers, the following strings will be used for comparison (constants for these
* values can be found on this type):
* <ul>
* <li>{@code Content-Type}</li>
* <li>{@code Content-Disposition}</li>
* <li>{@code Content-Language}</li>
* <li>{@code Content-Encoding}</li>
* <li>{@code Content-MD5}</li>
* <li>{@code Cache-Control}</li>
* </ul>
* Note that these properties also have a particular semantic in that if one is specified, all are updated. In other
* words, if any of the above is set, all those that are not set will be cleared. See the
* <a href="https:
* information.
*
* {@inheritDoc}
*/
@Override
public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException {
AzurePath aPath = validatePathInstanceType(path);
fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes;
BlobClient client = aPath.toBlobClient();
validateNotRoot(aPath, "Create");
if (checkParentDirectoryExists(aPath)) {
try {
List<FileAttribute<?>> attributeList = new ArrayList<>(Arrays.asList(fileAttributes));
BlobHttpHeaders headers = Utility.extractHttpHeaders(attributeList, logger);
Map<String, String> metadata = Utility.convertAttributesToMetadata(attributeList);
putDirectoryBlob(client, headers, metadata, new BlobRequestConditions().setIfNoneMatch("*"));
} catch (BlobStorageException e) {
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT
&& e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aPath.toString()));
} else {
throw Utility.logError(logger, new IOException("An error occured when creating the directory", e));
}
}
} else {
throw Utility.logError(logger, new IOException("Parent directory does not exist for path: "
+ aPath.toString()));
}
}
/**
* Creates the actual directory marker. This method should only be used when any necessary checks for proper
* conditions of directory creation (e.g. parent existence) have already been performed.
*
* @param destinationClient A blobClient pointing to the location where the directory should be put.
* @param headers Any headers that should be associated with the directory.
* @param metadata Any metadata that should be associated with the directory. This method will add the necessary
* metadata to distinguish this blob as a directory.
* @param requestConditions Any necessary request conditions to pass when creating the directory blob.
*/
private void putDirectoryBlob(BlobClient destinationClient, BlobHttpHeaders headers, Map<String, String> metadata,
BlobRequestConditions requestConditions) {
metadata = prepareMetadataForDirectory(metadata);
destinationClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), headers,
metadata, null, requestConditions, null, null);
}
/**
* Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents
* need only weakly exist.
*
* If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the
* container is a legitimate root within this file system.
*/
boolean checkParentDirectoryExists(Path path) throws IOException {
/*
If the parent is just the root (or null, which means the parent is implicitly the default directory which is a
root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal
existence check.
*/
Path parent = path.getParent();
return (parent == null || parent.equals(path.getRoot()))
|| checkDirectoryExists(((AzurePath) path.getParent()).toBlobClient());
}
/**
* Checks whether a directory exists by either being empty or having children.
*/
boolean checkDirectoryExists(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("One or both of the parameters was null."));
}
DirectoryStatus dirStatus = checkDirStatus(dirBlobClient);
return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY);
}
Map<String, String> prepareMetadataForDirectory(Map<String, String> metadata) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(DIR_METADATA_MARKER, "true");
return metadata;
}
BlobContainerClient getContainerClient(BlobClient client) {
return new BlobContainerClientBuilder().endpoint(client.getBlobUrl())
.pipeline(client.getHttpPipeline())
.buildClient();
}
/**
* Deletes the specified resource.
* <p>
* This method is not atomic. It is possible to delete a file in use by another process,
* and doing so will not immediately invalidate any channels open to that file--they will simply start to fail.
* Root directories cannot be deleted even when empty.
*
* {@inheritDoc}
*/
@Override
/**
* Copies the resource at the source location to the destination.
* <p>
* This method is not atomic. More specifically, the checks necessary to validate the
* inputs and state of the file system are not atomic with the actual copying of data. If the copy is triggered,
* the copy itself is atomic and only a complete copy will ever be left at the destination.
*
* In addition to those in the nio javadocs, this method has the following requirements for successful completion.
* {@link StandardCopyOption
* if this option is not passed, an {@link UnsupportedOperationException} will be thrown. Neither the source nor the
* destination can be a root directory; if either is a root directory, an {@link IllegalArgumentException} will be
* thrown. The parent directory of the destination must at least weakly exist; if it does not, an
* {@link IOException} will be thrown. The only supported option other than
* {@link StandardCopyOption
* other option will result in an {@link UnsupportedOperationException}.
*
* This method supports both virtual and concrete directories as both the source and destination. Unlike when
* creating a directory, the existence of a virtual directory at the destination will cause this operation to fail.
* This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as
* mentioned above, this check is not atomic with the creation of the resultant directory.
*
* {@inheritDoc}
* @see
*/
@Override
public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException {
AzurePath aSource = validatePathInstanceType(source);
AzurePath aDestination = validatePathInstanceType(destination);
if (aSource.equals(aDestination)) {
return;
}
boolean replaceExisting = false;
List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions));
if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
throw Utility.logError(logger, new UnsupportedOperationException("StandardCopyOption.COPY_ATTRIBUTES "
+ "must be specified as the service will always copy file attributes."));
}
optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES);
if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) {
replaceExisting = true;
optionsList.remove(StandardCopyOption.REPLACE_EXISTING);
}
if (!optionsList.isEmpty()) {
throw Utility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. Only "
+ "StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported."));
}
validateNotRoot(aSource, "Copy source");
validateNotRoot(aDestination, "Copy destination");
BlobClient sourceBlob = aSource.toBlobClient();
BlobClient destinationBlob = aDestination.toBlobClient();
DirectoryStatus destinationStatus = checkDirStatus(destinationBlob);
if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) {
throw Utility.logError(logger, new DirectoryNotEmptyException(aDestination.toString()));
}
/*
Set request conditions if we should not overwrite. We can error out here if we know something already exists,
but we will also create request conditions as a safeguard against overwriting something that was created
between our check and put.
*/
BlobRequestConditions requestConditions = null;
if (!replaceExisting) {
if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) {
throw Utility.logError(logger, new FileAlreadyExistsException(aDestination.toString()));
}
requestConditions = new BlobRequestConditions().setIfNoneMatch("*");
}
/*
More path validation
Check that the parent for the destination exists. We only need to perform this check if there is nothing
currently at the destination, for if the destination exists, its parent at least weakly exists and we
can skip a service call.
*/
if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !checkParentDirectoryExists(aDestination)) {
throw Utility.logError(logger, new IOException("Parent directory of destination location does not exist."
+ "The destination path is therefore invalid. Destination: " + aDestination.toString()));
}
/*
Try to copy the resource at the source path.
There is an optimization here where we try to do the copy first and only check for a virtual directory if
there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual
directories, however, this requires three requests: failed copy, check status, create directory. Depending on
customer scenarios and how many virtual directories they copy, it could be better to check the directory status
first and then do a copy or createDir, which would always be two requests for all resource types.
*/
try {
SyncPoller<BlobCopyInfo, Void> pollResponse =
destinationBlob.beginCopy(sourceBlob.getBlobUrl(), null, null, null, null, requestConditions, null);
pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS));
} catch (BlobStorageException e) {
if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)
&& !checkDirStatus(sourceBlob).equals(DirectoryStatus.DOES_NOT_EXIST)) {
/*
We already checked that the parent exists and validated the paths above, so we can put the blob
directly.
*/
putDirectoryBlob(destinationBlob, null, null, requestConditions);
} else {
throw Utility.logError(logger, new IOException(e));
}
} catch (RuntimeException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/**
* This method will check if a directory is extant and/or empty and accommodates virtual directories. This method
* will not check the status of root directories.
*/
DirectoryStatus checkDirStatus(BlobClient dirBlobClient) throws IOException {
if (dirBlobClient == null) {
throw Utility.logError(logger, new IllegalArgumentException("The blob client was null."));
}
BlobContainerClient containerClient = getContainerClient(dirBlobClient);
ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2)
.setPrefix(dirBlobClient.getBlobName())
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
/*
Do a list on prefix.
Zero elements means no virtual dir. Does not exist.
One element that matches this dir means empty.
One element that doesn't match this dir or more than one element. Not empty.
One element that matches the name but does not have a directory marker means the resource is not a directory.
Note that blob names that match the prefix exactly are returned in listing operations.
*/
try {
Iterator<BlobItem> blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR,
listOptions, null).iterator();
if (!blobIterator.hasNext()) {
return DirectoryStatus.DOES_NOT_EXIST;
} else {
BlobItem item = blobIterator.next();
if (blobIterator.hasNext()) {
return DirectoryStatus.NOT_EMPTY;
}
if (!item.getName().equals(dirBlobClient.getBlobName())) {
/*
Names do not match. Must be a virtual dir with one item. e.g. blob with name "foo/bar" means dir "foo"
exists.
*/
return DirectoryStatus.NOT_EMPTY;
}
if (item.getMetadata() != null && item.getMetadata().containsKey(DIR_METADATA_MARKER)) {
return DirectoryStatus.EMPTY;
}
return DirectoryStatus.NOT_A_DIRECTORY;
}
} catch (BlobStorageException e) {
throw Utility.logError(logger, new IOException(e));
}
}
/*int checkRootDirStatus(BlobContainerClient rootClient) {
}*/
/**
* {@inheritDoc}
*/
@Override
public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSameFile(Path path, Path path1) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isHidden(Path path) throws IOException {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public FileStore getFileStore(Path path) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void checkAccess(Path path, AccessMode... accessModes) throws IOException {
}
/**
* {@inheritDoc}
*/
@Override
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> aClass, LinkOption... linkOptions) {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> aClass, LinkOption... linkOptions)
throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> readAttributes(Path path, String s, LinkOption... linkOptions) throws IOException {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void setAttribute(Path path, String s, Object o, LinkOption... linkOptions) throws IOException {
}
void closeFileSystem(String fileSystemName) {
this.openFileSystems.remove(fileSystemName);
}
private String extractAccountName(URI uri) {
if (!uri.getScheme().equals(this.getScheme())) {
throw Utility.logError(this.logger, new IllegalArgumentException(
"URI scheme does not match this provider"));
}
if (CoreUtils.isNullOrEmpty(uri.getQuery())) {
throw Utility.logError(this.logger, new IllegalArgumentException("URI does not contain a query "
+ "component. FileSystems require a URI of the format \"azb:
}
String accountName = Flux.fromArray(uri.getQuery().split("&"))
.filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "="))
.switchIfEmpty(Mono.error(Utility.logError(this.logger, new IllegalArgumentException(
"URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI "
+ "of the format \"azb:
.map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1))
.blockLast();
if (CoreUtils.isNullOrEmpty(accountName)) {
throw Utility.logError(logger, new IllegalArgumentException("No account name provided in URI query."));
}
return accountName;
}
private AzurePath validatePathInstanceType(Path path) {
if (!(path instanceof AzurePath)) {
throw Utility.logError(logger, new IllegalArgumentException("This provider cannot operate on subtypes of "
+ "Path other than AzurePath"));
}
return (AzurePath) path;
}
private void validateNotRoot(Path path, String operation) {
if (((AzurePath) path).isRoot()) {
throw Utility.logError(logger, new IllegalArgumentException(
String.format("%s is not supported on a root directory. Path: %s", operation, path.toString())));
}
}
} |
use logger please | public void generateTestData() {
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new CosmosItemProperties(mapper.writeValueAsString(person)));
} catch (JsonProcessingException e) {
e.printStackTrace();
}
}
} | e.printStackTrace(); | public void generateTestData() {
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new CosmosItemProperties(mapper.writeValueAsString(person)));
} catch (JsonProcessingException e) {
logger.error(e.getMessage());
}
}
String resourceJson = String.format("{ " + "\"id\": \"%s\", \"intprop\": %d }", UUID.randomUUID().toString(),
5);
String resourceJson2 = String.format("{ " + "\"id\": \"%s\", \"intprop\": %f }", UUID.randomUUID().toString(),
5.0f);
docs.add(new CosmosItemProperties(resourceJson));
docs.add(new CosmosItemProperties(resourceJson2));
} | class DistinctQueryTests extends TestSuiteBase {
private final String FIELD = "name";
private CosmosAsyncContainer createdCollection;
private ArrayList<CosmosItemProperties> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public DistinctQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LosAngeles;
case 1:
return City.NewYork;
case 2:
return City.Seattle;
}
return City.LosAngeles;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider")
public void queryDocuments(boolean qmEnabled) {
String query = "SELECT DISTINCT c.name from c";
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setPopulateQueryMetrics(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(query, options,
CosmosItemProperties.class);
List<Object> nameList = docs.stream().map(d -> d.get(FIELD)).collect(Collectors.toList());
List<Object> distinctNameList = nameList.stream().distinct().collect(Collectors.toList());
FeedResponseListValidator<CosmosItemProperties> validator =
new FeedResponseListValidator.Builder<CosmosItemProperties>()
.totalSize(distinctNameList.size())
.allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>()
.requestChargeGreaterThanOrEqualTo(1.0)
.build())
.hasValidQueryMetrics(qmEnabled)
.build();
validateQuerySuccess(queryObservable.byPage(), validator, TIMEOUT);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryDistinctDocuments() {
List<String> queries = Arrays.asList(
"SELECT %s VALUE null",
"SELECT %s VALUE false",
"SELECT %s VALUE true",
"SELECT %s VALUE 1",
"SELECT %s VALUE 'a'",
"SELECT %s VALUE [null, true, false, 1, 'a']",
"SELECT %s false AS p",
"SELECT %s 1 AS p",
"SELECT %s 'a' AS p",
"SELECT %s VALUE null FROM c",
"SELECT %s VALUE false FROM c",
"SELECT %s VALUE 1 FROM c",
"SELECT %s VALUE 'a' FROM c",
"SELECT %s null AS p FROM c",
"SELECT %s false AS p FROM c",
"SELECT %s 1 AS p FROM c",
"SELECT %s 'a' AS p FROM c",
"SELECT %s VALUE c.income from c",
"SELECT %s VALUE c.age from c",
"SELECT %s c.income, c.income AS income2 from c",
"SELECT %s c.income, c.age from c",
"SELECT %s c.name from c",
"SELECT %s VALUE c.city from c",
"SELECT %s VALUE c.partitionKey from c",
"SELECT %s c.name, c.name AS name2 from c",
"SELECT %s c.name, c.city from c",
"SELECT %s c.children from c",
"SELECT %s c.children, c.children AS children2 from c",
"SELECT %s VALUE c.pet from c",
"SELECT %s c.pet, c.pet AS pet2 from c",
"SELECT %s VALUE ABS(c.age) FROM c",
"SELECT %s VALUE LEFT(c.name, 1) FROM c",
"SELECT %s VALUE c.name || ', ' || (c.city ?? '') FROM c",
"SELECT %s VALUE ARRAY_LENGTH(c.children) FROM c",
"SELECT %s VALUE IS_DEFINED(c.city) FROM c",
"SELECT %s VALUE (c.children[0].age ?? 0) + (c.children[1].age ?? 0) FROM c",
"SELECT %s c.name FROM c ORDER BY c.name ASC",
"SELECT %s c.age FROM c ORDER BY c.age",
"SELECT %s c.city FROM c ORDER BY c.city",
"SELECT %s c.city FROM c ORDER BY c.age",
"SELECT %s LEFT(c.name, 1) FROM c ORDER BY c.name",
"SELECT %s TOP 2147483647 VALUE c.age FROM c",
"SELECT %s TOP 2147483647 c.age FROM c ORDER BY c.age",
"SELECT %s VALUE MAX(c.age) FROM c",
"SELECT %s VALUE c.age FROM p JOIN c IN p.children",
"SELECT %s p.age AS ParentAge, c.age ChildAge FROM p JOIN c IN p.children",
"SELECT %s VALUE c.name FROM p JOIN c IN p.children",
"SELECT %s p.name AS ParentName, c.name ChildName FROM p JOIN c IN p.children",
"SELECT %s r.age, s FROM r JOIN (SELECT DISTINCT VALUE c FROM (SELECT 1 a) c) s WHERE r.age > 25",
"SELECT %s p.name, p.age FROM (SELECT DISTINCT * FROM r) p WHERE p.age > 25",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p",
"SELECT %s p.name, p.age FROM p WHERE (SELECT DISTINCT VALUE LEFT(p.name, 1)) > 'A' AND (SELECT " +
"DISTINCT VALUE p.age) > 21",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p WHERE (SELECT DISTINCT VALUE p.name) >" +
" 'A' OR (SELECT DISTINCT VALUE p.age) > 21",
"SELECT %s * FROM c"
);
for (String query : queries) {
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setMaxDegreeOfParallelism(2);
List<CosmosItemProperties> documentsFromWithDistinct = new ArrayList<>();
List<CosmosItemProperties> documentsFromWithoutDistinct = new ArrayList<>();
final String queryWithDistinct = String.format(query, "DISTINCT");
final String queryWithoutDistinct = String.format(query, "");
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(queryWithDistinct, options,
CosmosItemProperties.class);
Iterator<FeedResponse<CosmosItemProperties>> iterator = queryObservable.byPage().toIterable().iterator();
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
UnorderedDistinctMap distinctMap = new UnorderedDistinctMap();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
for (CosmosItemProperties document : next.getResults()) {
if (distinctMap.add(document, outHash)) {
documentsFromWithoutDistinct.add(document);
}
}
}
CosmosPagedFlux<CosmosItemProperties> queryObservableWithDistinct = createdCollection
.queryItems(queryWithoutDistinct, options,
CosmosItemProperties.class);
iterator = queryObservableWithDistinct.byPage().toIterable().iterator();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
documentsFromWithDistinct.addAll(next.getResults());
}
assertThat(documentsFromWithDistinct.size()).isEqualTo(documentsFromWithoutDistinct.size());
}
}
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
Person p = new Person(name, city, income, people, age, pet, guid);
return p;
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.clientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(clientBuilder());
}
public enum City {
NewYork,
LosAngeles,
Seattle
}
public final class Pet extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("age")
public int age;
public Pet(String name, int age) {
this.name = name;
this.age = age;
}
}
public final class Person extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("id")
public String id;
@JsonProperty("city")
public City city;
@JsonProperty("income")
public double income;
@JsonProperty("children")
public List<Person> children;
@JsonProperty("age")
public int age;
@JsonProperty("pet")
public Pet pet;
@JsonProperty("guid")
public UUID guid;
public Person(String name, City city, double income, List<Person> children, int age, Pet pet, UUID guid) {
this.name = name;
this.city = city;
this.income = income;
this.children = children;
this.age = age;
this.pet = pet;
this.guid = guid;
this.id = UUID.randomUUID().toString();
}
}
} | class DistinctQueryTests extends TestSuiteBase {
private final int TIMEOUT_120 = 120000;
private final String FIELD = "name";
private CosmosAsyncContainer createdCollection;
private ArrayList<CosmosItemProperties> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuildersWithDirect")
public DistinctQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LOS_ANGELES;
case 1:
return City.NEW_YORK;
case 2:
return City.SEATTLE;
}
return City.LOS_ANGELES;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider")
public void queryDocuments(boolean qmEnabled) {
String query = "SELECT DISTINCT c.name from c";
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setPopulateQueryMetrics(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<CosmosItemProperties> queryObservable =
createdCollection.queryItems(query,
options,
CosmosItemProperties.class);
List<Object> nameList = docs.stream()
.map(d -> ModelBridgeInternal.getObjectFromJsonSerializable(d, FIELD))
.collect(Collectors.toList());
List<Object> distinctNameList = nameList.stream().distinct().collect(Collectors.toList());
FeedResponseListValidator<CosmosItemProperties> validator =
new FeedResponseListValidator.Builder<CosmosItemProperties>()
.totalSize(distinctNameList.size())
.allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>()
.requestChargeGreaterThanOrEqualTo(1.0)
.build())
.hasValidQueryMetrics(qmEnabled)
.build();
validateQuerySuccess(queryObservable.byPage(), validator, TIMEOUT);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT_120)
public void queryDistinctDocuments() {
List<String> queries = Arrays.asList(
"SELECT %s VALUE null",
"SELECT %s VALUE false",
"SELECT %s VALUE true",
"SELECT %s VALUE 1",
"SELECT %s VALUE 'a'",
"SELECT %s VALUE [null, true, false, 1, 'a']",
"SELECT %s false AS p",
"SELECT %s 1 AS p",
"SELECT %s 'a' AS p",
"SELECT %s VALUE null FROM c",
"SELECT %s VALUE false FROM c",
"SELECT %s VALUE 1 FROM c",
"SELECT %s VALUE 'a' FROM c",
"SELECT %s null AS p FROM c",
"SELECT %s false AS p FROM c",
"SELECT %s 1 AS p FROM c",
"SELECT %s 'a' AS p FROM c",
"SELECT %s VALUE c.income from c",
"SELECT %s VALUE c.age from c",
"SELECT %s c.income, c.income AS income2 from c",
"SELECT %s c.income, c.age from c",
"SELECT %s c.name from c",
"SELECT %s VALUE c.city from c",
"SELECT %s c.name, c.name AS name2 from c",
"SELECT %s c.name, c.city from c",
"SELECT %s c.children from c",
"SELECT %s c.children, c.children AS children2 from c",
"SELECT %s VALUE c.pet from c",
"SELECT %s c.pet, c.pet AS pet2 from c",
"SELECT %s VALUE ABS(c.age) FROM c",
"SELECT %s VALUE LEFT(c.name, 1) FROM c",
"SELECT %s VALUE c.name || ', ' || (c.city ?? '') FROM c",
"SELECT %s VALUE ARRAY_LENGTH(c.children) FROM c",
"SELECT %s VALUE IS_DEFINED(c.city) FROM c",
"SELECT %s VALUE (c.children[0].age ?? 0) + (c.children[1].age ?? 0) FROM c",
"SELECT %s c.name FROM c ORDER BY c.name ASC",
"SELECT %s c.age FROM c ORDER BY c.age",
"SELECT %s c.city FROM c ORDER BY c.city",
"SELECT %s c.city FROM c ORDER BY c.age",
"SELECT %s LEFT(c.name, 1) FROM c ORDER BY c.name",
"SELECT %s TOP 2147483647 VALUE c.age FROM c",
"SELECT %s TOP 2147483647 c.age FROM c ORDER BY c.age",
"SELECT %s VALUE MAX(c.age) FROM c",
"SELECT %s VALUE c.age FROM p JOIN c IN p.children",
"SELECT %s p.age AS ParentAge, c.age ChildAge FROM p JOIN c IN p.children",
"SELECT %s VALUE c.name FROM p JOIN c IN p.children",
"SELECT %s p.name AS ParentName, c.name ChildName FROM p JOIN c IN p.children",
"SELECT %s r.age, s FROM r JOIN (SELECT DISTINCT VALUE c FROM (SELECT 1 a) c) s WHERE r.age > 25",
"SELECT %s p.name, p.age FROM (SELECT DISTINCT * FROM r) p WHERE p.age > 25",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p",
"SELECT %s p.name, p.age FROM p WHERE (SELECT DISTINCT VALUE LEFT(p.name, 1)) > 'A' AND (SELECT " +
"DISTINCT VALUE p.age) > 21",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p WHERE (SELECT DISTINCT VALUE p.name) >" +
" 'A' OR (SELECT DISTINCT VALUE p.age) > 21",
"SELECT %s * FROM c"
);
for (String query : queries) {
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setMaxDegreeOfParallelism(2);
List<CosmosItemProperties> documentsFromWithDistinct = new ArrayList<>();
List<CosmosItemProperties> documentsFromWithoutDistinct = new ArrayList<>();
final String queryWithDistinct = String.format(query, "DISTINCT");
final String queryWithoutDistinct = String.format(query, "");
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(queryWithoutDistinct,
options,
CosmosItemProperties.class);
Iterator<FeedResponse<CosmosItemProperties>> iterator = queryObservable.byPage().toIterable().iterator();
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
UnorderedDistinctMap distinctMap = new UnorderedDistinctMap();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
for (CosmosItemProperties document : next.getResults()) {
if (distinctMap.add(document, outHash)) {
documentsFromWithoutDistinct.add(document);
}
}
}
CosmosPagedFlux<CosmosItemProperties> queryObservableWithDistinct = createdCollection
.queryItems(queryWithDistinct, options,
CosmosItemProperties.class);
iterator = queryObservableWithDistinct.byPage().toIterable().iterator();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
documentsFromWithDistinct.addAll(next.getResults());
}
assertThat(documentsFromWithDistinct.size()).isGreaterThanOrEqualTo(1);
assertThat(documentsFromWithDistinct.size()).isEqualTo(documentsFromWithoutDistinct.size());
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider")
public void queryDocumentsForDistinctIntValues(boolean qmEnabled) {
String query = "SELECT DISTINCT c.intprop from c";
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setPopulateQueryMetrics(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(query, options,
CosmosItemProperties.class);
Iterator<FeedResponse<CosmosItemProperties>> iterator = queryObservable.byPage().collectList().single().block()
.iterator();
List<CosmosItemProperties> itemPropertiesList = new ArrayList<>();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
itemPropertiesList.addAll(next.getResults());
}
assertThat(itemPropertiesList.size()).isEqualTo(2);
List<Object> intpropList = itemPropertiesList
.stream()
.map(cosmosItemProperties ->
ModelBridgeInternal.getObjectFromJsonSerializable(
cosmosItemProperties, "intprop"))
.collect(Collectors.toList());
assertThat(intpropList).containsExactlyInAnyOrder(null, 5);
}
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
Person p = new Person(name, city, income, people, age, pet, guid);
return p;
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.getClientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
}
public enum City {
NEW_YORK,
LOS_ANGELES,
SEATTLE
}
public final class Pet extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("age")
public int age;
public Pet(String name, int age) {
this.name = name;
this.age = age;
}
}
public final class Person extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("id")
public String id;
@JsonProperty("city")
public City city;
@JsonProperty("income")
public double income;
@JsonProperty("children")
public List<Person> children;
@JsonProperty("age")
public int age;
@JsonProperty("pet")
public Pet pet;
@JsonProperty("guid")
public UUID guid;
public Person(String name, City city, double income, List<Person> children, int age, Pet pet, UUID guid) {
this.name = name;
this.city = city;
this.income = income;
this.children = children;
this.age = age;
this.pet = pet;
this.guid = guid;
this.id = UUID.randomUUID().toString();
}
}
} |
ObjectMapper instantiation is expensive. you should have a static ObjectMapper instead: `OrderedDistinctMap.OBJECT_MAPPER` | public OrderedDistinctMap(String lastHash) {
mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
this.lastHash = lastHash;
} | mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true); | public OrderedDistinctMap(String lastHash) {
this.lastHash = lastHash;
} | class OrderedDistinctMap extends DistinctMap {
private final ObjectMapper mapper;
private String lastHash;
@Override
public boolean add(Object resource, Utils.ValueHolder<String> outHash) {
try {
String sortedJson = mapper.writeValueAsString(resource);
MessageDigest md = MessageDigest.getInstance("SHA-1");
byte[] digest = md.digest(sortedJson.getBytes(Charset.defaultCharset()));
outHash.v = Base64.getEncoder().encodeToString(digest);
boolean value = !StringUtils.equals(lastHash, outHash.v);
lastHash = outHash.v;
return value;
} catch (JsonProcessingException | NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} | class OrderedDistinctMap extends DistinctMap {
private volatile String lastHash;
@Override
public boolean add(Resource resource, Utils.ValueHolder<String> outHash) {
try {
outHash.v = getHash(resource);
final boolean value = !StringUtils.equals(lastHash, outHash.v);
lastHash = outHash.v;
return value;
} catch (JsonProcessingException | NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} |
ditto, ObjectMapper instantiation is expensive. we should use a static one. You can create here `UnorderedDistinctMap.OBJECT_MAMMER` | public UnorderedDistinctMap() {
resultSet = new HashSet<>();
mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
} | mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true); | public UnorderedDistinctMap() {
resultSet = Collections.newSetFromMap(new ConcurrentHashMap<>());
} | class UnorderedDistinctMap extends DistinctMap {
private final ObjectMapper mapper;
private final HashSet<String> resultSet;
@Override
public boolean add(Object resource, Utils.ValueHolder<String> outHash) {
try {
String sortedJson = mapper.writeValueAsString(resource);
MessageDigest md = MessageDigest.getInstance("SHA-1");
byte[] digest = md.digest(sortedJson.getBytes(Charset.defaultCharset()));
outHash.v = Base64.getEncoder().encodeToString(digest);
return resultSet.add(outHash.v);
} catch (JsonProcessingException e) {
throw new IllegalStateException(e);
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} | class UnorderedDistinctMap extends DistinctMap {
private final Set<String> resultSet;
@Override
public boolean add(Resource resource, Utils.ValueHolder<String> outHash) {
try {
outHash.v = getHash(resource);
return resultSet.add(outHash.v);
} catch (JsonProcessingException | NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} |
This should be `false` right? since the ordering does not change the hash. | public void objectOrder(DistinctQueryType queryType) {
String resource1 = String.format("{ "
+ "\"id\": \"12345\", "
+ "\"mypk\": \"abcde\""
+ "} ");
String resource2 = String.format("{ "
+ "\"mypk\": \"abcde\","
+ "\"id\": \"12345\""
+ "} ");
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource1, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource2, outHash);
assertThat(add2).as("Order of objects in map should be treated same").isTrue();
} | assertThat(add2).as("Order of objects in map should be treated same").isTrue(); | public void objectOrder(DistinctQueryType queryType) {
String resource1 = String.format("{ "
+ "\"id\": \"12345\","
+ "\"mypk\": \"abcde\""
+ "} ");
String resource2 = String.format("{ "
+ "\"mypk\": \"abcde\","
+ "\"id\": \"12345\""
+ "} ");
Document resource = new Document(resource1);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
resource = new Document(resource2);
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("Order of objects in map should be treated same").isFalse();
} | class DistinctMapTest {
@DataProvider(name = "distinctMapArgProvider")
public Object[][] distinctMapArgProvider() {
return new Object[][] {
{DistinctQueryType.Ordered},
{DistinctQueryType.Unordered},
};
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void integerValue(DistinctQueryType queryType) {
String resource = String.format("{ " + "\"id\": %d + \"}", 5);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
System.out.println("outHash5 = " + outHash.v);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = String.format("{ " + "\"id\": %d + \"}", 3);
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void stringValue(DistinctQueryType queryType) {
String resource = String.format("{ " + "\"id\": \"5\" + \"}");
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = String.format("{ " + "\"id\": \"6\" + \"}");
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void objectValue(DistinctQueryType queryType) {
String resource = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxyyzz-abc");
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxy%zz-abc");
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void nullValue(DistinctQueryType queryType) {
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(null, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(null, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
} | class DistinctMapTest {
@DataProvider(name = "distinctMapArgProvider")
public Object[][] distinctMapArgProvider() {
return new Object[][] {
{DistinctQueryType.ORDERED},
{DistinctQueryType.UNORDERED},
};
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void integerValue(DistinctQueryType queryType) {
String doc = String.format("{ " + "\"id\": \"%s\", \"prop\": %d }", UUID.randomUUID().toString(), 5);
Document resource = new Document(doc);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = new Document(String.format("{ " + "\"id\": \"%s\", \"prop\": %d }", UUID.randomUUID().toString(),
3));
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void stringValue(DistinctQueryType queryType) {
String resourceString = String.format("{ " + "\"id\": \"a\" }"); Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
Document resource = new Document(resourceString);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = new Document(String.format("{ " + "\"id\": \"b\" }"));
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void objectValue(DistinctQueryType queryType) {
String resourceString = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxyyzz-abc");
Document resource = new Document(resourceString);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resourceString = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxy%zz-abc");
resource = new Document(resourceString);
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void arrayValue(DistinctQueryType queryType) {
String resourceString = String.format("{ "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}");
Document resource = new Document(resourceString);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resourceString = String.format("{ "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671441]]"
+ "}");
resource = new Document(resourceString);
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void nullValue(DistinctQueryType queryType) {
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(null, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(null, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
} |
Done | public OrderedDistinctMap(String lastHash) {
mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
this.lastHash = lastHash;
} | mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true); | public OrderedDistinctMap(String lastHash) {
this.lastHash = lastHash;
} | class OrderedDistinctMap extends DistinctMap {
private final ObjectMapper mapper;
private String lastHash;
@Override
public boolean add(Object resource, Utils.ValueHolder<String> outHash) {
try {
String sortedJson = mapper.writeValueAsString(resource);
MessageDigest md = MessageDigest.getInstance("SHA-1");
byte[] digest = md.digest(sortedJson.getBytes(Charset.defaultCharset()));
outHash.v = Base64.getEncoder().encodeToString(digest);
boolean value = !StringUtils.equals(lastHash, outHash.v);
lastHash = outHash.v;
return value;
} catch (JsonProcessingException | NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} | class OrderedDistinctMap extends DistinctMap {
private volatile String lastHash;
@Override
public boolean add(Resource resource, Utils.ValueHolder<String> outHash) {
try {
outHash.v = getHash(resource);
final boolean value = !StringUtils.equals(lastHash, outHash.v);
lastHash = outHash.v;
return value;
} catch (JsonProcessingException | NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} |
Done | public UnorderedDistinctMap() {
resultSet = new HashSet<>();
mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
} | mapper = new ObjectMapper().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true); | public UnorderedDistinctMap() {
resultSet = Collections.newSetFromMap(new ConcurrentHashMap<>());
} | class UnorderedDistinctMap extends DistinctMap {
private final ObjectMapper mapper;
private final HashSet<String> resultSet;
@Override
public boolean add(Object resource, Utils.ValueHolder<String> outHash) {
try {
String sortedJson = mapper.writeValueAsString(resource);
MessageDigest md = MessageDigest.getInstance("SHA-1");
byte[] digest = md.digest(sortedJson.getBytes(Charset.defaultCharset()));
outHash.v = Base64.getEncoder().encodeToString(digest);
return resultSet.add(outHash.v);
} catch (JsonProcessingException e) {
throw new IllegalStateException(e);
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} | class UnorderedDistinctMap extends DistinctMap {
private final Set<String> resultSet;
@Override
public boolean add(Resource resource, Utils.ValueHolder<String> outHash) {
try {
outHash.v = getHash(resource);
return resultSet.add(outHash.v);
} catch (JsonProcessingException | NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
} |
Done | public void generateTestData() {
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new CosmosItemProperties(mapper.writeValueAsString(person)));
} catch (JsonProcessingException e) {
e.printStackTrace();
}
}
} | e.printStackTrace(); | public void generateTestData() {
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new CosmosItemProperties(mapper.writeValueAsString(person)));
} catch (JsonProcessingException e) {
logger.error(e.getMessage());
}
}
String resourceJson = String.format("{ " + "\"id\": \"%s\", \"intprop\": %d }", UUID.randomUUID().toString(),
5);
String resourceJson2 = String.format("{ " + "\"id\": \"%s\", \"intprop\": %f }", UUID.randomUUID().toString(),
5.0f);
docs.add(new CosmosItemProperties(resourceJson));
docs.add(new CosmosItemProperties(resourceJson2));
} | class DistinctQueryTests extends TestSuiteBase {
private final String FIELD = "name";
private CosmosAsyncContainer createdCollection;
private ArrayList<CosmosItemProperties> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public DistinctQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LosAngeles;
case 1:
return City.NewYork;
case 2:
return City.Seattle;
}
return City.LosAngeles;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider")
public void queryDocuments(boolean qmEnabled) {
String query = "SELECT DISTINCT c.name from c";
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setPopulateQueryMetrics(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(query, options,
CosmosItemProperties.class);
List<Object> nameList = docs.stream().map(d -> d.get(FIELD)).collect(Collectors.toList());
List<Object> distinctNameList = nameList.stream().distinct().collect(Collectors.toList());
FeedResponseListValidator<CosmosItemProperties> validator =
new FeedResponseListValidator.Builder<CosmosItemProperties>()
.totalSize(distinctNameList.size())
.allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>()
.requestChargeGreaterThanOrEqualTo(1.0)
.build())
.hasValidQueryMetrics(qmEnabled)
.build();
validateQuerySuccess(queryObservable.byPage(), validator, TIMEOUT);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void queryDistinctDocuments() {
List<String> queries = Arrays.asList(
"SELECT %s VALUE null",
"SELECT %s VALUE false",
"SELECT %s VALUE true",
"SELECT %s VALUE 1",
"SELECT %s VALUE 'a'",
"SELECT %s VALUE [null, true, false, 1, 'a']",
"SELECT %s false AS p",
"SELECT %s 1 AS p",
"SELECT %s 'a' AS p",
"SELECT %s VALUE null FROM c",
"SELECT %s VALUE false FROM c",
"SELECT %s VALUE 1 FROM c",
"SELECT %s VALUE 'a' FROM c",
"SELECT %s null AS p FROM c",
"SELECT %s false AS p FROM c",
"SELECT %s 1 AS p FROM c",
"SELECT %s 'a' AS p FROM c",
"SELECT %s VALUE c.income from c",
"SELECT %s VALUE c.age from c",
"SELECT %s c.income, c.income AS income2 from c",
"SELECT %s c.income, c.age from c",
"SELECT %s c.name from c",
"SELECT %s VALUE c.city from c",
"SELECT %s VALUE c.partitionKey from c",
"SELECT %s c.name, c.name AS name2 from c",
"SELECT %s c.name, c.city from c",
"SELECT %s c.children from c",
"SELECT %s c.children, c.children AS children2 from c",
"SELECT %s VALUE c.pet from c",
"SELECT %s c.pet, c.pet AS pet2 from c",
"SELECT %s VALUE ABS(c.age) FROM c",
"SELECT %s VALUE LEFT(c.name, 1) FROM c",
"SELECT %s VALUE c.name || ', ' || (c.city ?? '') FROM c",
"SELECT %s VALUE ARRAY_LENGTH(c.children) FROM c",
"SELECT %s VALUE IS_DEFINED(c.city) FROM c",
"SELECT %s VALUE (c.children[0].age ?? 0) + (c.children[1].age ?? 0) FROM c",
"SELECT %s c.name FROM c ORDER BY c.name ASC",
"SELECT %s c.age FROM c ORDER BY c.age",
"SELECT %s c.city FROM c ORDER BY c.city",
"SELECT %s c.city FROM c ORDER BY c.age",
"SELECT %s LEFT(c.name, 1) FROM c ORDER BY c.name",
"SELECT %s TOP 2147483647 VALUE c.age FROM c",
"SELECT %s TOP 2147483647 c.age FROM c ORDER BY c.age",
"SELECT %s VALUE MAX(c.age) FROM c",
"SELECT %s VALUE c.age FROM p JOIN c IN p.children",
"SELECT %s p.age AS ParentAge, c.age ChildAge FROM p JOIN c IN p.children",
"SELECT %s VALUE c.name FROM p JOIN c IN p.children",
"SELECT %s p.name AS ParentName, c.name ChildName FROM p JOIN c IN p.children",
"SELECT %s r.age, s FROM r JOIN (SELECT DISTINCT VALUE c FROM (SELECT 1 a) c) s WHERE r.age > 25",
"SELECT %s p.name, p.age FROM (SELECT DISTINCT * FROM r) p WHERE p.age > 25",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p",
"SELECT %s p.name, p.age FROM p WHERE (SELECT DISTINCT VALUE LEFT(p.name, 1)) > 'A' AND (SELECT " +
"DISTINCT VALUE p.age) > 21",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p WHERE (SELECT DISTINCT VALUE p.name) >" +
" 'A' OR (SELECT DISTINCT VALUE p.age) > 21",
"SELECT %s * FROM c"
);
for (String query : queries) {
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setMaxDegreeOfParallelism(2);
List<CosmosItemProperties> documentsFromWithDistinct = new ArrayList<>();
List<CosmosItemProperties> documentsFromWithoutDistinct = new ArrayList<>();
final String queryWithDistinct = String.format(query, "DISTINCT");
final String queryWithoutDistinct = String.format(query, "");
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(queryWithDistinct, options,
CosmosItemProperties.class);
Iterator<FeedResponse<CosmosItemProperties>> iterator = queryObservable.byPage().toIterable().iterator();
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
UnorderedDistinctMap distinctMap = new UnorderedDistinctMap();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
for (CosmosItemProperties document : next.getResults()) {
if (distinctMap.add(document, outHash)) {
documentsFromWithoutDistinct.add(document);
}
}
}
CosmosPagedFlux<CosmosItemProperties> queryObservableWithDistinct = createdCollection
.queryItems(queryWithoutDistinct, options,
CosmosItemProperties.class);
iterator = queryObservableWithDistinct.byPage().toIterable().iterator();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
documentsFromWithDistinct.addAll(next.getResults());
}
assertThat(documentsFromWithDistinct.size()).isEqualTo(documentsFromWithoutDistinct.size());
}
}
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
Person p = new Person(name, city, income, people, age, pet, guid);
return p;
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.clientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(clientBuilder());
}
public enum City {
NewYork,
LosAngeles,
Seattle
}
public final class Pet extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("age")
public int age;
public Pet(String name, int age) {
this.name = name;
this.age = age;
}
}
public final class Person extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("id")
public String id;
@JsonProperty("city")
public City city;
@JsonProperty("income")
public double income;
@JsonProperty("children")
public List<Person> children;
@JsonProperty("age")
public int age;
@JsonProperty("pet")
public Pet pet;
@JsonProperty("guid")
public UUID guid;
public Person(String name, City city, double income, List<Person> children, int age, Pet pet, UUID guid) {
this.name = name;
this.city = city;
this.income = income;
this.children = children;
this.age = age;
this.pet = pet;
this.guid = guid;
this.id = UUID.randomUUID().toString();
}
}
} | class DistinctQueryTests extends TestSuiteBase {
private final int TIMEOUT_120 = 120000;
private final String FIELD = "name";
private CosmosAsyncContainer createdCollection;
private ArrayList<CosmosItemProperties> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuildersWithDirect")
public DistinctQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LOS_ANGELES;
case 1:
return City.NEW_YORK;
case 2:
return City.SEATTLE;
}
return City.LOS_ANGELES;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider")
public void queryDocuments(boolean qmEnabled) {
String query = "SELECT DISTINCT c.name from c";
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setPopulateQueryMetrics(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<CosmosItemProperties> queryObservable =
createdCollection.queryItems(query,
options,
CosmosItemProperties.class);
List<Object> nameList = docs.stream()
.map(d -> ModelBridgeInternal.getObjectFromJsonSerializable(d, FIELD))
.collect(Collectors.toList());
List<Object> distinctNameList = nameList.stream().distinct().collect(Collectors.toList());
FeedResponseListValidator<CosmosItemProperties> validator =
new FeedResponseListValidator.Builder<CosmosItemProperties>()
.totalSize(distinctNameList.size())
.allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>()
.requestChargeGreaterThanOrEqualTo(1.0)
.build())
.hasValidQueryMetrics(qmEnabled)
.build();
validateQuerySuccess(queryObservable.byPage(), validator, TIMEOUT);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT_120)
public void queryDistinctDocuments() {
List<String> queries = Arrays.asList(
"SELECT %s VALUE null",
"SELECT %s VALUE false",
"SELECT %s VALUE true",
"SELECT %s VALUE 1",
"SELECT %s VALUE 'a'",
"SELECT %s VALUE [null, true, false, 1, 'a']",
"SELECT %s false AS p",
"SELECT %s 1 AS p",
"SELECT %s 'a' AS p",
"SELECT %s VALUE null FROM c",
"SELECT %s VALUE false FROM c",
"SELECT %s VALUE 1 FROM c",
"SELECT %s VALUE 'a' FROM c",
"SELECT %s null AS p FROM c",
"SELECT %s false AS p FROM c",
"SELECT %s 1 AS p FROM c",
"SELECT %s 'a' AS p FROM c",
"SELECT %s VALUE c.income from c",
"SELECT %s VALUE c.age from c",
"SELECT %s c.income, c.income AS income2 from c",
"SELECT %s c.income, c.age from c",
"SELECT %s c.name from c",
"SELECT %s VALUE c.city from c",
"SELECT %s c.name, c.name AS name2 from c",
"SELECT %s c.name, c.city from c",
"SELECT %s c.children from c",
"SELECT %s c.children, c.children AS children2 from c",
"SELECT %s VALUE c.pet from c",
"SELECT %s c.pet, c.pet AS pet2 from c",
"SELECT %s VALUE ABS(c.age) FROM c",
"SELECT %s VALUE LEFT(c.name, 1) FROM c",
"SELECT %s VALUE c.name || ', ' || (c.city ?? '') FROM c",
"SELECT %s VALUE ARRAY_LENGTH(c.children) FROM c",
"SELECT %s VALUE IS_DEFINED(c.city) FROM c",
"SELECT %s VALUE (c.children[0].age ?? 0) + (c.children[1].age ?? 0) FROM c",
"SELECT %s c.name FROM c ORDER BY c.name ASC",
"SELECT %s c.age FROM c ORDER BY c.age",
"SELECT %s c.city FROM c ORDER BY c.city",
"SELECT %s c.city FROM c ORDER BY c.age",
"SELECT %s LEFT(c.name, 1) FROM c ORDER BY c.name",
"SELECT %s TOP 2147483647 VALUE c.age FROM c",
"SELECT %s TOP 2147483647 c.age FROM c ORDER BY c.age",
"SELECT %s VALUE MAX(c.age) FROM c",
"SELECT %s VALUE c.age FROM p JOIN c IN p.children",
"SELECT %s p.age AS ParentAge, c.age ChildAge FROM p JOIN c IN p.children",
"SELECT %s VALUE c.name FROM p JOIN c IN p.children",
"SELECT %s p.name AS ParentName, c.name ChildName FROM p JOIN c IN p.children",
"SELECT %s r.age, s FROM r JOIN (SELECT DISTINCT VALUE c FROM (SELECT 1 a) c) s WHERE r.age > 25",
"SELECT %s p.name, p.age FROM (SELECT DISTINCT * FROM r) p WHERE p.age > 25",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p",
"SELECT %s p.name, p.age FROM p WHERE (SELECT DISTINCT VALUE LEFT(p.name, 1)) > 'A' AND (SELECT " +
"DISTINCT VALUE p.age) > 21",
"SELECT %s p.name, (SELECT DISTINCT VALUE p.age) AS Age FROM p WHERE (SELECT DISTINCT VALUE p.name) >" +
" 'A' OR (SELECT DISTINCT VALUE p.age) > 21",
"SELECT %s * FROM c"
);
for (String query : queries) {
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setMaxDegreeOfParallelism(2);
List<CosmosItemProperties> documentsFromWithDistinct = new ArrayList<>();
List<CosmosItemProperties> documentsFromWithoutDistinct = new ArrayList<>();
final String queryWithDistinct = String.format(query, "DISTINCT");
final String queryWithoutDistinct = String.format(query, "");
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(queryWithoutDistinct,
options,
CosmosItemProperties.class);
Iterator<FeedResponse<CosmosItemProperties>> iterator = queryObservable.byPage().toIterable().iterator();
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
UnorderedDistinctMap distinctMap = new UnorderedDistinctMap();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
for (CosmosItemProperties document : next.getResults()) {
if (distinctMap.add(document, outHash)) {
documentsFromWithoutDistinct.add(document);
}
}
}
CosmosPagedFlux<CosmosItemProperties> queryObservableWithDistinct = createdCollection
.queryItems(queryWithDistinct, options,
CosmosItemProperties.class);
iterator = queryObservableWithDistinct.byPage().toIterable().iterator();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
documentsFromWithDistinct.addAll(next.getResults());
}
assertThat(documentsFromWithDistinct.size()).isGreaterThanOrEqualTo(1);
assertThat(documentsFromWithDistinct.size()).isEqualTo(documentsFromWithoutDistinct.size());
}
}
@Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider")
public void queryDocumentsForDistinctIntValues(boolean qmEnabled) {
String query = "SELECT DISTINCT c.intprop from c";
FeedOptions options = new FeedOptions();
options.setMaxItemCount(5);
options.setPopulateQueryMetrics(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<CosmosItemProperties> queryObservable = createdCollection.queryItems(query, options,
CosmosItemProperties.class);
Iterator<FeedResponse<CosmosItemProperties>> iterator = queryObservable.byPage().collectList().single().block()
.iterator();
List<CosmosItemProperties> itemPropertiesList = new ArrayList<>();
while (iterator.hasNext()) {
FeedResponse<CosmosItemProperties> next = iterator.next();
itemPropertiesList.addAll(next.getResults());
}
assertThat(itemPropertiesList.size()).isEqualTo(2);
List<Object> intpropList = itemPropertiesList
.stream()
.map(cosmosItemProperties ->
ModelBridgeInternal.getObjectFromJsonSerializable(
cosmosItemProperties, "intprop"))
.collect(Collectors.toList());
assertThat(intpropList).containsExactlyInAnyOrder(null, 5);
}
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
Person p = new Person(name, city, income, people, age, pet, guid);
return p;
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.getClientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
}
public enum City {
NEW_YORK,
LOS_ANGELES,
SEATTLE
}
public final class Pet extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("age")
public int age;
public Pet(String name, int age) {
this.name = name;
this.age = age;
}
}
public final class Person extends JsonSerializable {
@JsonProperty("name")
public String name;
@JsonProperty("id")
public String id;
@JsonProperty("city")
public City city;
@JsonProperty("income")
public double income;
@JsonProperty("children")
public List<Person> children;
@JsonProperty("age")
public int age;
@JsonProperty("pet")
public Pet pet;
@JsonProperty("guid")
public UUID guid;
public Person(String name, City city, double income, List<Person> children, int age, Pet pet, UUID guid) {
this.name = name;
this.city = city;
this.income = income;
this.children = children;
this.age = age;
this.pet = pet;
this.guid = guid;
this.id = UUID.randomUUID().toString();
}
}
} |
Yeah it should be, corrected the test | public void objectOrder(DistinctQueryType queryType) {
String resource1 = String.format("{ "
+ "\"id\": \"12345\", "
+ "\"mypk\": \"abcde\""
+ "} ");
String resource2 = String.format("{ "
+ "\"mypk\": \"abcde\","
+ "\"id\": \"12345\""
+ "} ");
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource1, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource2, outHash);
assertThat(add2).as("Order of objects in map should be treated same").isTrue();
} | assertThat(add2).as("Order of objects in map should be treated same").isTrue(); | public void objectOrder(DistinctQueryType queryType) {
String resource1 = String.format("{ "
+ "\"id\": \"12345\","
+ "\"mypk\": \"abcde\""
+ "} ");
String resource2 = String.format("{ "
+ "\"mypk\": \"abcde\","
+ "\"id\": \"12345\""
+ "} ");
Document resource = new Document(resource1);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
resource = new Document(resource2);
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("Order of objects in map should be treated same").isFalse();
} | class DistinctMapTest {
@DataProvider(name = "distinctMapArgProvider")
public Object[][] distinctMapArgProvider() {
return new Object[][] {
{DistinctQueryType.Ordered},
{DistinctQueryType.Unordered},
};
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void integerValue(DistinctQueryType queryType) {
String resource = String.format("{ " + "\"id\": %d + \"}", 5);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
System.out.println("outHash5 = " + outHash.v);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = String.format("{ " + "\"id\": %d + \"}", 3);
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void stringValue(DistinctQueryType queryType) {
String resource = String.format("{ " + "\"id\": \"5\" + \"}");
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = String.format("{ " + "\"id\": \"6\" + \"}");
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void objectValue(DistinctQueryType queryType) {
String resource = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxyyzz-abc");
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxy%zz-abc");
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void nullValue(DistinctQueryType queryType) {
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(null, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(null, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
} | class DistinctMapTest {
@DataProvider(name = "distinctMapArgProvider")
public Object[][] distinctMapArgProvider() {
return new Object[][] {
{DistinctQueryType.ORDERED},
{DistinctQueryType.UNORDERED},
};
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void integerValue(DistinctQueryType queryType) {
String doc = String.format("{ " + "\"id\": \"%s\", \"prop\": %d }", UUID.randomUUID().toString(), 5);
Document resource = new Document(doc);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = new Document(String.format("{ " + "\"id\": \"%s\", \"prop\": %d }", UUID.randomUUID().toString(),
3));
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void stringValue(DistinctQueryType queryType) {
String resourceString = String.format("{ " + "\"id\": \"a\" }"); Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
Document resource = new Document(resourceString);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resource = new Document(String.format("{ " + "\"id\": \"b\" }"));
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void objectValue(DistinctQueryType queryType) {
String resourceString = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxyyzz-abc");
Document resource = new Document(resourceString);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resourceString = String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}", 117546, "xxy%zz-abc");
resource = new Document(resourceString);
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void arrayValue(DistinctQueryType queryType) {
String resourceString = String.format("{ "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}");
Document resource = new Document(resourceString);
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(resource, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(resource, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
resourceString = String.format("{ "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671441]]"
+ "}");
resource = new Document(resourceString);
boolean add3 = distinctMap.add(resource, outHash);
assertThat(add3).as("different value should be added again").isTrue();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
public void nullValue(DistinctQueryType queryType) {
DistinctMap distinctMap = DistinctMap.create(queryType, null);
Utils.ValueHolder<String> outHash = new Utils.ValueHolder<>();
boolean add = distinctMap.add(null, outHash);
assertThat(add).as("Value should be added first time").isTrue();
boolean add2 = distinctMap.add(null, outHash);
assertThat(add2).as("same value should not be added again").isFalse();
}
@Test(groups = "unit", dataProvider = "distinctMapArgProvider")
} |
Do you have a test that peeks the next sequence number? | void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
} | StepVerifier.create(consumer.peek()) | void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(Mono.just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(Mono.just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this peek one messages.
*/
@Test
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
Mockito.framework().clearInlineMocks();
}
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage message1;
@Mock
private ServiceBusReceivedMessage message2;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
/* Arrange */
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(message1), just(message2));
StepVerifier.create(consumer.peek())
.expectNext(message1)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(message2)
.verifyComplete();
}
/**
* Verifies that this peek one messages.
*/
@Test
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} |
Can you leave a comment about how this behavior is safer and resolves the issue in question so we have some context in case we ever have to investigate it later? | Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() {
if (batchOperationQueue.isEmpty()) {
return monoError(logger, new UnsupportedOperationException("Empty batch requests aren't allowed."));
}
BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo();
Deque<BlobBatchOperation<?>> operations = batchOperationQueue;
batchOperationQueue = new ConcurrentLinkedDeque<>();
List<Mono<? extends Response<?>>> batchOperationResponses = new ArrayList<>();
while (!operations.isEmpty()) {
BlobBatchOperation<?> batchOperation = operations.pop();
batchOperationResponses.add(batchOperation.getResponse()
.subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(),
BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(),
BATCH_OPERATION_INFO, operationInfo)));
}
return Mono.when(batchOperationResponses)
.doOnSuccess(ignored -> operationInfo.finalizeBatchOperations())
.thenReturn(operationInfo);
} | List<Mono<? extends Response<?>>> batchOperationResponses = new ArrayList<>(); | Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() {
if (batchOperationQueue.isEmpty()) {
return monoError(logger, new UnsupportedOperationException("Empty batch requests aren't allowed."));
}
BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo();
Deque<BlobBatchOperation<?>> operations = batchOperationQueue;
batchOperationQueue = new ConcurrentLinkedDeque<>();
List<Mono<? extends Response<?>>> batchOperationResponses = new ArrayList<>();
while (!operations.isEmpty()) {
BlobBatchOperation<?> batchOperation = operations.pop();
batchOperationResponses.add(batchOperation.getResponse()
.subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(),
BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(),
BATCH_OPERATION_INFO, operationInfo)));
}
/*
* Mono.when is more robust and safer to use than the previous implementation, using Flux.generate, as it is
* fulfilled/complete once all publishers comprising it are completed whereas Flux.generate will complete once
* the sink completes. Certain authorization methods, such as AAD, may have deferred processing where the sink
* would trigger completion before the request bodies are added into the batch, leading to a state where the
* request would believe it had a different size than it actually had, Mono.when bypasses this issue as it must
* wait until the deferred processing has completed to trigger the `thenReturn` operator.
*/
return Mono.when(batchOperationResponses)
.doOnSuccess(ignored -> operationInfo.finalizeBatchOperations())
.thenReturn(operationInfo);
} | class BlobBatch {
private static final String X_MS_VERSION = "x-ms-version";
private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path";
private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response";
private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info";
private static final String PATH_TEMPLATE = "%s/%s";
/*
* Track the status codes expected for the batching operations here as the batch body does not get parsed in
* Azure Core where this information is maintained.
*/
private static final int[] EXPECTED_DELETE_STATUS_CODES = {202};
private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202};
private final ClientLogger logger = new ClientLogger(BlobBatch.class);
private final BlobAsyncClient blobAsyncClient;
private Deque<BlobBatchOperation<?>> batchOperationQueue;
private BlobBatchType batchType;
BlobBatch(String accountUrl, HttpPipeline pipeline) {
boolean batchHeadersPolicySet = false;
HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder();
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
HttpPipelinePolicy policy = pipeline.getPolicy(i);
if (policy instanceof StorageSharedKeyCredentialPolicy) {
batchHeadersPolicySet = true;
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(policy);
}
if (!batchHeadersPolicySet) {
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(this::buildBatchOperation);
this.blobAsyncClient = new BlobClientBuilder()
.endpoint(accountUrl)
.blobName("")
.pipeline(batchPipelineBuilder.build())
.buildAsyncClient();
this.batchOperationQueue = new ConcurrentLinkedDeque<>();
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName,
DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl) {
return deleteBlobHelper(getUrlPath(blobUrl), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions);
}
private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
setBatchType(BlobBatchType.DELETE);
return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions),
urlPath, EXPECTED_DELETE_STATUS_CODES);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier,
String leaseId) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId);
}
private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) {
setBatchType(BlobBatchType.SET_TIER);
return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId),
urlPath, EXPECTED_SET_TIER_STATUS_CODES);
}
private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath,
int... expectedStatusCodes) {
BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes);
batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath));
return batchOperationResponse;
}
private String getUrlPath(String url) {
return UrlBuilder.parse(url).getPath();
}
private void setBatchType(BlobBatchType batchType) {
if (this.batchType == null) {
this.batchType = batchType;
} else if (this.batchType != batchType) {
throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT,
"'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType)));
}
}
/*
* This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp.
* Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this
* and it adds the header "Content-Id" that allows the request to be mapped to the response.
*/
private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
context.getHttpRequest().getHeaders().remove(X_MS_VERSION);
Map<String, String> headers = context.getHttpRequest().getHeaders().toMap();
headers.entrySet().removeIf(header -> header.getValue() == null);
context.getHttpRequest().setHeaders(new HttpHeaders(headers));
return next.process();
}
/*
* This performs changing the request URL to the value passed through the pipeline context. This policy is used in
* place of constructing a new client for each batch request that is being sent.
*/
private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
try {
UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl());
requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString());
context.getHttpRequest().setUrl(requestUrl.toUrl());
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex)));
}
return next.process();
}
/*
* This will "send" the batch operation request when triggered, it simply acts as a way to build and write the
* batch operation into the overall request and then returns nothing as the response.
*/
private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get();
BlobBatchOperationResponse<?> batchOperationResponse =
(BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get();
operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest());
return Mono.empty();
}
} | class BlobBatch {
private static final String X_MS_VERSION = "x-ms-version";
private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path";
private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response";
private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info";
private static final String PATH_TEMPLATE = "%s/%s";
/*
* Track the status codes expected for the batching operations here as the batch body does not get parsed in
* Azure Core where this information is maintained.
*/
private static final int[] EXPECTED_DELETE_STATUS_CODES = {202};
private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202};
private final ClientLogger logger = new ClientLogger(BlobBatch.class);
private final BlobAsyncClient blobAsyncClient;
private Deque<BlobBatchOperation<?>> batchOperationQueue;
private BlobBatchType batchType;
BlobBatch(String accountUrl, HttpPipeline pipeline) {
boolean batchHeadersPolicySet = false;
HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder();
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
HttpPipelinePolicy policy = pipeline.getPolicy(i);
if (policy instanceof StorageSharedKeyCredentialPolicy) {
batchHeadersPolicySet = true;
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(policy);
}
if (!batchHeadersPolicySet) {
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(this::buildBatchOperation);
this.blobAsyncClient = new BlobClientBuilder()
.endpoint(accountUrl)
.blobName("")
.pipeline(batchPipelineBuilder.build())
.buildAsyncClient();
this.batchOperationQueue = new ConcurrentLinkedDeque<>();
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName,
DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl) {
return deleteBlobHelper(getUrlPath(blobUrl), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions);
}
private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
setBatchType(BlobBatchType.DELETE);
return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions),
urlPath, EXPECTED_DELETE_STATUS_CODES);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier,
String leaseId) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId);
}
private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) {
setBatchType(BlobBatchType.SET_TIER);
return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId),
urlPath, EXPECTED_SET_TIER_STATUS_CODES);
}
private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath,
int... expectedStatusCodes) {
BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes);
batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath));
return batchOperationResponse;
}
private String getUrlPath(String url) {
return UrlBuilder.parse(url).getPath();
}
private void setBatchType(BlobBatchType batchType) {
if (this.batchType == null) {
this.batchType = batchType;
} else if (this.batchType != batchType) {
throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT,
"'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType)));
}
}
/*
* This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp.
* Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this
* and it adds the header "Content-Id" that allows the request to be mapped to the response.
*/
private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
context.getHttpRequest().getHeaders().remove(X_MS_VERSION);
Map<String, String> headers = context.getHttpRequest().getHeaders().toMap();
headers.entrySet().removeIf(header -> header.getValue() == null);
context.getHttpRequest().setHeaders(new HttpHeaders(headers));
return next.process();
}
/*
* This performs changing the request URL to the value passed through the pipeline context. This policy is used in
* place of constructing a new client for each batch request that is being sent.
*/
private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
try {
UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl());
requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString());
context.getHttpRequest().setUrl(requestUrl.toUrl());
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex)));
}
return next.process();
}
/*
* This will "send" the batch operation request when triggered, it simply acts as a way to build and write the
* batch operation into the overall request and then returns nothing as the response.
*/
private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get();
BlobBatchOperationResponse<?> batchOperationResponse =
(BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get();
operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest());
return Mono.empty();
}
} |
Added a comment explaining the change and why it is safer and resolves the issue. | Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() {
if (batchOperationQueue.isEmpty()) {
return monoError(logger, new UnsupportedOperationException("Empty batch requests aren't allowed."));
}
BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo();
Deque<BlobBatchOperation<?>> operations = batchOperationQueue;
batchOperationQueue = new ConcurrentLinkedDeque<>();
List<Mono<? extends Response<?>>> batchOperationResponses = new ArrayList<>();
while (!operations.isEmpty()) {
BlobBatchOperation<?> batchOperation = operations.pop();
batchOperationResponses.add(batchOperation.getResponse()
.subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(),
BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(),
BATCH_OPERATION_INFO, operationInfo)));
}
return Mono.when(batchOperationResponses)
.doOnSuccess(ignored -> operationInfo.finalizeBatchOperations())
.thenReturn(operationInfo);
} | List<Mono<? extends Response<?>>> batchOperationResponses = new ArrayList<>(); | Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() {
if (batchOperationQueue.isEmpty()) {
return monoError(logger, new UnsupportedOperationException("Empty batch requests aren't allowed."));
}
BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo();
Deque<BlobBatchOperation<?>> operations = batchOperationQueue;
batchOperationQueue = new ConcurrentLinkedDeque<>();
List<Mono<? extends Response<?>>> batchOperationResponses = new ArrayList<>();
while (!operations.isEmpty()) {
BlobBatchOperation<?> batchOperation = operations.pop();
batchOperationResponses.add(batchOperation.getResponse()
.subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(),
BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(),
BATCH_OPERATION_INFO, operationInfo)));
}
/*
* Mono.when is more robust and safer to use than the previous implementation, using Flux.generate, as it is
* fulfilled/complete once all publishers comprising it are completed whereas Flux.generate will complete once
* the sink completes. Certain authorization methods, such as AAD, may have deferred processing where the sink
* would trigger completion before the request bodies are added into the batch, leading to a state where the
* request would believe it had a different size than it actually had, Mono.when bypasses this issue as it must
* wait until the deferred processing has completed to trigger the `thenReturn` operator.
*/
return Mono.when(batchOperationResponses)
.doOnSuccess(ignored -> operationInfo.finalizeBatchOperations())
.thenReturn(operationInfo);
} | class BlobBatch {
private static final String X_MS_VERSION = "x-ms-version";
private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path";
private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response";
private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info";
private static final String PATH_TEMPLATE = "%s/%s";
/*
* Track the status codes expected for the batching operations here as the batch body does not get parsed in
* Azure Core where this information is maintained.
*/
private static final int[] EXPECTED_DELETE_STATUS_CODES = {202};
private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202};
private final ClientLogger logger = new ClientLogger(BlobBatch.class);
private final BlobAsyncClient blobAsyncClient;
private Deque<BlobBatchOperation<?>> batchOperationQueue;
private BlobBatchType batchType;
BlobBatch(String accountUrl, HttpPipeline pipeline) {
boolean batchHeadersPolicySet = false;
HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder();
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
HttpPipelinePolicy policy = pipeline.getPolicy(i);
if (policy instanceof StorageSharedKeyCredentialPolicy) {
batchHeadersPolicySet = true;
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(policy);
}
if (!batchHeadersPolicySet) {
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(this::buildBatchOperation);
this.blobAsyncClient = new BlobClientBuilder()
.endpoint(accountUrl)
.blobName("")
.pipeline(batchPipelineBuilder.build())
.buildAsyncClient();
this.batchOperationQueue = new ConcurrentLinkedDeque<>();
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName,
DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl) {
return deleteBlobHelper(getUrlPath(blobUrl), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions);
}
private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
setBatchType(BlobBatchType.DELETE);
return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions),
urlPath, EXPECTED_DELETE_STATUS_CODES);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier,
String leaseId) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId);
}
private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) {
setBatchType(BlobBatchType.SET_TIER);
return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId),
urlPath, EXPECTED_SET_TIER_STATUS_CODES);
}
private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath,
int... expectedStatusCodes) {
BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes);
batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath));
return batchOperationResponse;
}
private String getUrlPath(String url) {
return UrlBuilder.parse(url).getPath();
}
private void setBatchType(BlobBatchType batchType) {
if (this.batchType == null) {
this.batchType = batchType;
} else if (this.batchType != batchType) {
throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT,
"'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType)));
}
}
/*
* This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp.
* Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this
* and it adds the header "Content-Id" that allows the request to be mapped to the response.
*/
private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
context.getHttpRequest().getHeaders().remove(X_MS_VERSION);
Map<String, String> headers = context.getHttpRequest().getHeaders().toMap();
headers.entrySet().removeIf(header -> header.getValue() == null);
context.getHttpRequest().setHeaders(new HttpHeaders(headers));
return next.process();
}
/*
* This performs changing the request URL to the value passed through the pipeline context. This policy is used in
* place of constructing a new client for each batch request that is being sent.
*/
private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
try {
UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl());
requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString());
context.getHttpRequest().setUrl(requestUrl.toUrl());
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex)));
}
return next.process();
}
/*
* This will "send" the batch operation request when triggered, it simply acts as a way to build and write the
* batch operation into the overall request and then returns nothing as the response.
*/
private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get();
BlobBatchOperationResponse<?> batchOperationResponse =
(BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get();
operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest());
return Mono.empty();
}
} | class BlobBatch {
private static final String X_MS_VERSION = "x-ms-version";
private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path";
private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response";
private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info";
private static final String PATH_TEMPLATE = "%s/%s";
/*
* Track the status codes expected for the batching operations here as the batch body does not get parsed in
* Azure Core where this information is maintained.
*/
private static final int[] EXPECTED_DELETE_STATUS_CODES = {202};
private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202};
private final ClientLogger logger = new ClientLogger(BlobBatch.class);
private final BlobAsyncClient blobAsyncClient;
private Deque<BlobBatchOperation<?>> batchOperationQueue;
private BlobBatchType batchType;
BlobBatch(String accountUrl, HttpPipeline pipeline) {
boolean batchHeadersPolicySet = false;
HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder();
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
HttpPipelinePolicy policy = pipeline.getPolicy(i);
if (policy instanceof StorageSharedKeyCredentialPolicy) {
batchHeadersPolicySet = true;
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(policy);
}
if (!batchHeadersPolicySet) {
batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl);
}
batchPipelineBuilder.policies(this::buildBatchOperation);
this.blobAsyncClient = new BlobClientBuilder()
.endpoint(accountUrl)
.blobName("")
.pipeline(batchPipelineBuilder.build())
.buildAsyncClient();
this.batchOperationQueue = new ConcurrentLinkedDeque<>();
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String containerName, String blobName,
DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl) {
return deleteBlobHelper(getUrlPath(blobUrl), null, null);
}
/**
* Adds a delete blob operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param deleteOptions Delete options for the blob and its snapshots.
* @param blobRequestConditions Additional access conditions that must be met to allow this operation.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions);
}
private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions,
BlobRequestConditions blobRequestConditions) {
setBatchType(BlobBatchType.DELETE);
return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions),
urlPath, EXPECTED_DELETE_STATUS_CODES);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param containerName The container of the blob.
* @param blobName The name of the blob.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier,
String leaseId) {
return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName,
Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null);
}
/**
* Adds a set tier operation to the batch.
*
* <p><strong>Code sample</strong></p>
*
* {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier
*
* @param blobUrl URL of the blob. Blob name must be encoded to UTF-8.
* @param accessTier The tier to set on the blob.
* @param leaseId The lease ID the active lease on the blob must match.
* @return a {@link Response} that will be used to associate this operation to the response when the batch is
* submitted.
* @throws UnsupportedOperationException If this batch has already added an operation of another type.
*/
public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) {
return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId);
}
private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) {
setBatchType(BlobBatchType.SET_TIER);
return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId),
urlPath, EXPECTED_SET_TIER_STATUS_CODES);
}
private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath,
int... expectedStatusCodes) {
BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes);
batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath));
return batchOperationResponse;
}
private String getUrlPath(String url) {
return UrlBuilder.parse(url).getPath();
}
private void setBatchType(BlobBatchType batchType) {
if (this.batchType == null) {
this.batchType = batchType;
} else if (this.batchType != batchType) {
throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT,
"'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType)));
}
}
/*
* This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp.
* Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this
* and it adds the header "Content-Id" that allows the request to be mapped to the response.
*/
private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
context.getHttpRequest().getHeaders().remove(X_MS_VERSION);
Map<String, String> headers = context.getHttpRequest().getHeaders().toMap();
headers.entrySet().removeIf(header -> header.getValue() == null);
context.getHttpRequest().setHeaders(new HttpHeaders(headers));
return next.process();
}
/*
* This performs changing the request URL to the value passed through the pipeline context. This policy is used in
* place of constructing a new client for each batch request that is being sent.
*/
private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
try {
UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl());
requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString());
context.getHttpRequest().setUrl(requestUrl.toUrl());
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex)));
}
return next.process();
}
/*
* This will "send" the batch operation request when triggered, it simply acts as a way to build and write the
* batch operation into the overall request and then returns nothing as the response.
*/
private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get();
BlobBatchOperationResponse<?> batchOperationResponse =
(BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get();
operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest());
return Mono.empty();
}
} |
This isn't actually necessary, especially since we specify overwrite on downloadToFile. | public static void main(String[] args) throws IOException, NoSuchAlgorithmException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account datalake service URL endpoint.
* The URL typically looks like this:
*/
String endPoint = String.format(Locale.ROOT, "https:
/*
* Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline.
* Now you can use the storageClient to perform various file system and path operations.
*/
DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endPoint).credential(credential).buildClient();
/*
* This example shows several common operations just to get you started.
*/
/*
* Create a client that references a to-be-created file system in your Azure Storage account. This returns a
* FileSystemClient uses the same endpoint, credential and pipeline from storageClient.
* Note that file system names require lowercase.
*/
DataLakeFileSystemClient fileSystemClient = storageClient.getFileSystemClient("myjavafilesystemparallelupload" + System.currentTimeMillis());
/*
* Create a file system in Storage datalake account.
*/
fileSystemClient.create();
/*
* Create a FileClient object that wraps a file's endpoint and a default pipeline, the client give us access to upload the file.
*/
String filename = "BigFile.bin";
DataLakeFileClient fileClient = fileSystemClient.getFileClient(filename);
/*
* Create the empty uploadFile and downloadFile.
*/
File largeFile = createTempEmptyFile(filename);
File downloadFile = createTempEmptyFile("downloadFile.bin");
/*
* Generate random things to uploadFile, which makes the file with size of 100MB.
*/
long fileSize = 100 * 1024 * 1024L;
createTempFileWithFileSize(largeFile, fileSize);
/*
* Upload the large file to storage file.
*/
fileClient.uploadFromFile(largeFile.getPath());
/*
* Download the large file from storage file to the local downloadFile path.
*/
fileClient.readToFile(downloadFile.getPath(), true);
/*
* Check the files are same after the round-trip.
*/
if (Files.exists(downloadFile.toPath()) && Files.exists(largeFile.toPath())) {
checkTwoFilesAreTheSame(largeFile, downloadFile);
System.out.println("The file we upload is the same as the one we download.");
} else {
throw new RuntimeException("Did not find the upload or download file.");
}
/*
* Clean up the local files and storage file system.
*/
fileSystemClient.delete();
Files.deleteIfExists(largeFile.toPath());
Files.deleteIfExists(downloadFile.toPath());
} | File largeFile = createTempEmptyFile(filename); | public static void main(String[] args) throws IOException, NoSuchAlgorithmException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account datalake service URL endpoint.
* The URL typically looks like this:
*/
String endPoint = String.format(Locale.ROOT, "https:
/*
* Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline.
* Now you can use the storageClient to perform various file system and path operations.
*/
DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endPoint).credential(credential).buildClient();
/*
* This example shows several common operations just to get you started.
*/
/*
* Create a client that references a to-be-created file system in your Azure Storage account. This returns a
* FileSystemClient uses the same endpoint, credential and pipeline from storageClient.
* Note that file system names require lowercase.
*/
DataLakeFileSystemClient fileSystemClient = storageClient.getFileSystemClient("myjavafilesystemparallelupload" + System.currentTimeMillis());
/*
* Create a file system in Storage datalake account.
*/
fileSystemClient.create();
/*
* Create a FileClient object that wraps a file's endpoint and a default pipeline, the client give us access to upload the file.
*/
String filename = "BigFile.bin";
DataLakeFileClient fileClient = fileSystemClient.getFileClient(filename);
/*
* Create the empty uploadFile and downloadFile.
*/
File largeFile = createTempEmptyFile(filename);
File downloadFile = createTempEmptyFile("downloadFile.bin");
/*
* Generate random things to uploadFile, which makes the file with size of 100MB.
*/
long fileSize = 100 * 1024 * 1024L;
createTempFileWithFileSize(largeFile, fileSize);
/*
* Upload the large file to storage file.
*/
fileClient.uploadFromFile(largeFile.getPath());
/*
* Download the large file from storage file to the local downloadFile path.
*/
fileClient.readToFile(downloadFile.getPath(), true);
/*
* Check the files are same after the round-trip.
*/
if (Files.exists(downloadFile.toPath()) && Files.exists(largeFile.toPath())) {
checkTwoFilesAreTheSame(largeFile, downloadFile);
System.out.println("The file we upload is the same as the one we download.");
} else {
throw new RuntimeException("Did not find the upload or download file.");
}
/*
* Clean up the local files and storage file system.
*/
fileSystemClient.delete();
Files.deleteIfExists(largeFile.toPath());
Files.deleteIfExists(downloadFile.toPath());
} | class FileTransferExample {
private static final String LARGE_TEST_FOLDER = "test-large-files/";
/**
* Entry point into the file transfer examples for Storage datalake.
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws NoSuchAlgorithmException If {@code MD5} isn't supported
* @throws RuntimeException If the uploaded or downloaded file wasn't found
*/
private static File createTempEmptyFile(String fileName) throws IOException {
String pathName = "./folderPath/" + LARGE_TEST_FOLDER;
File dirPath = new File(pathName);
if (dirPath.exists() || dirPath.mkdirs()) {
File f = new File(pathName + fileName);
if (f.exists() || f.createNewFile()) {
return f;
} else {
throw new RuntimeException("Failed to create the large file.");
}
} else {
throw new RuntimeException("Failed to create the large file dir.");
}
}
private static void createTempFileWithFileSize(File f, long size) throws IOException {
RandomAccessFile raf = new RandomAccessFile(f, "rw");
raf.setLength(size);
raf.close();
}
private static void checkTwoFilesAreTheSame(File f1, File f2) throws IOException, NoSuchAlgorithmException {
String checksumUpload = getFileChecksum(f1);
String checksumDownload = getFileChecksum(f2);
if (!checksumUpload.equals(checksumDownload)) {
throw new RuntimeException("The file upload does not match the file download.");
}
}
private static String getFileChecksum(File file) throws IOException, NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
try (FileInputStream fis = new FileInputStream(file); FileChannel ch = fis.getChannel()) {
final ByteBuffer buf = ByteBuffer.allocateDirect(8192);
int b = ch.read(buf);
while (b != -1 && b != 0) {
buf.flip();
final byte[] bytes = new byte[b];
buf.get(bytes);
md.update(bytes, 0, b);
buf.clear();
b = ch.read(buf);
}
return new String(md.digest(), StandardCharsets.UTF_8);
}
}
} | class FileTransferExample {
private static final String LARGE_TEST_FOLDER = "test-large-files/";
/**
* Entry point into the file transfer examples for Storage datalake.
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws NoSuchAlgorithmException If {@code MD5} isn't supported
* @throws RuntimeException If the uploaded or downloaded file wasn't found
*/
private static File createTempEmptyFile(String fileName) throws IOException {
String pathName = "./folderPath/" + LARGE_TEST_FOLDER;
File dirPath = new File(pathName);
if (dirPath.exists() || dirPath.mkdirs()) {
File f = new File(pathName + fileName);
if (f.exists() || f.createNewFile()) {
return f;
} else {
throw new RuntimeException("Failed to create the large file.");
}
} else {
throw new RuntimeException("Failed to create the large file dir.");
}
}
private static void createTempFileWithFileSize(File f, long size) throws IOException {
RandomAccessFile raf = new RandomAccessFile(f, "rw");
raf.setLength(size);
raf.close();
}
private static void checkTwoFilesAreTheSame(File f1, File f2) throws IOException, NoSuchAlgorithmException {
String checksumUpload = getFileChecksum(f1);
String checksumDownload = getFileChecksum(f2);
if (!checksumUpload.equals(checksumDownload)) {
throw new RuntimeException("The file upload does not match the file download.");
}
}
private static String getFileChecksum(File file) throws IOException, NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
try (FileInputStream fis = new FileInputStream(file); FileChannel ch = fis.getChannel()) {
final ByteBuffer buf = ByteBuffer.allocateDirect(8192);
int b = ch.read(buf);
while (b != -1 && b != 0) {
buf.flip();
final byte[] bytes = new byte[b];
buf.get(bytes);
md.update(bytes, 0, b);
buf.clear();
b = ch.read(buf);
}
return new String(md.digest(), StandardCharsets.UTF_8);
}
}
} |
I think its fine in the sample just cause that helper will go through and make sure the upload/download file is in the same directory. | public static void main(String[] args) throws IOException, NoSuchAlgorithmException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account datalake service URL endpoint.
* The URL typically looks like this:
*/
String endPoint = String.format(Locale.ROOT, "https:
/*
* Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline.
* Now you can use the storageClient to perform various file system and path operations.
*/
DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endPoint).credential(credential).buildClient();
/*
* This example shows several common operations just to get you started.
*/
/*
* Create a client that references a to-be-created file system in your Azure Storage account. This returns a
* FileSystemClient uses the same endpoint, credential and pipeline from storageClient.
* Note that file system names require lowercase.
*/
DataLakeFileSystemClient fileSystemClient = storageClient.getFileSystemClient("myjavafilesystemparallelupload" + System.currentTimeMillis());
/*
* Create a file system in Storage datalake account.
*/
fileSystemClient.create();
/*
* Create a FileClient object that wraps a file's endpoint and a default pipeline, the client give us access to upload the file.
*/
String filename = "BigFile.bin";
DataLakeFileClient fileClient = fileSystemClient.getFileClient(filename);
/*
* Create the empty uploadFile and downloadFile.
*/
File largeFile = createTempEmptyFile(filename);
File downloadFile = createTempEmptyFile("downloadFile.bin");
/*
* Generate random things to uploadFile, which makes the file with size of 100MB.
*/
long fileSize = 100 * 1024 * 1024L;
createTempFileWithFileSize(largeFile, fileSize);
/*
* Upload the large file to storage file.
*/
fileClient.uploadFromFile(largeFile.getPath());
/*
* Download the large file from storage file to the local downloadFile path.
*/
fileClient.readToFile(downloadFile.getPath(), true);
/*
* Check the files are same after the round-trip.
*/
if (Files.exists(downloadFile.toPath()) && Files.exists(largeFile.toPath())) {
checkTwoFilesAreTheSame(largeFile, downloadFile);
System.out.println("The file we upload is the same as the one we download.");
} else {
throw new RuntimeException("Did not find the upload or download file.");
}
/*
* Clean up the local files and storage file system.
*/
fileSystemClient.delete();
Files.deleteIfExists(largeFile.toPath());
Files.deleteIfExists(downloadFile.toPath());
} | File largeFile = createTempEmptyFile(filename); | public static void main(String[] args) throws IOException, NoSuchAlgorithmException {
/*
* From the Azure portal, get your Storage account's name and account key.
*/
String accountName = SampleHelper.getAccountName();
String accountKey = SampleHelper.getAccountKey();
/*
* Use your Storage account's name and key to create a credential object; this is used to access your account.
*/
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
/*
* From the Azure portal, get your Storage account datalake service URL endpoint.
* The URL typically looks like this:
*/
String endPoint = String.format(Locale.ROOT, "https:
/*
* Create a DataLakeServiceClient object that wraps the service endpoint, credential and a request pipeline.
* Now you can use the storageClient to perform various file system and path operations.
*/
DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder().endpoint(endPoint).credential(credential).buildClient();
/*
* This example shows several common operations just to get you started.
*/
/*
* Create a client that references a to-be-created file system in your Azure Storage account. This returns a
* FileSystemClient uses the same endpoint, credential and pipeline from storageClient.
* Note that file system names require lowercase.
*/
DataLakeFileSystemClient fileSystemClient = storageClient.getFileSystemClient("myjavafilesystemparallelupload" + System.currentTimeMillis());
/*
* Create a file system in Storage datalake account.
*/
fileSystemClient.create();
/*
* Create a FileClient object that wraps a file's endpoint and a default pipeline, the client give us access to upload the file.
*/
String filename = "BigFile.bin";
DataLakeFileClient fileClient = fileSystemClient.getFileClient(filename);
/*
* Create the empty uploadFile and downloadFile.
*/
File largeFile = createTempEmptyFile(filename);
File downloadFile = createTempEmptyFile("downloadFile.bin");
/*
* Generate random things to uploadFile, which makes the file with size of 100MB.
*/
long fileSize = 100 * 1024 * 1024L;
createTempFileWithFileSize(largeFile, fileSize);
/*
* Upload the large file to storage file.
*/
fileClient.uploadFromFile(largeFile.getPath());
/*
* Download the large file from storage file to the local downloadFile path.
*/
fileClient.readToFile(downloadFile.getPath(), true);
/*
* Check the files are same after the round-trip.
*/
if (Files.exists(downloadFile.toPath()) && Files.exists(largeFile.toPath())) {
checkTwoFilesAreTheSame(largeFile, downloadFile);
System.out.println("The file we upload is the same as the one we download.");
} else {
throw new RuntimeException("Did not find the upload or download file.");
}
/*
* Clean up the local files and storage file system.
*/
fileSystemClient.delete();
Files.deleteIfExists(largeFile.toPath());
Files.deleteIfExists(downloadFile.toPath());
} | class FileTransferExample {
private static final String LARGE_TEST_FOLDER = "test-large-files/";
/**
* Entry point into the file transfer examples for Storage datalake.
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws NoSuchAlgorithmException If {@code MD5} isn't supported
* @throws RuntimeException If the uploaded or downloaded file wasn't found
*/
private static File createTempEmptyFile(String fileName) throws IOException {
String pathName = "./folderPath/" + LARGE_TEST_FOLDER;
File dirPath = new File(pathName);
if (dirPath.exists() || dirPath.mkdirs()) {
File f = new File(pathName + fileName);
if (f.exists() || f.createNewFile()) {
return f;
} else {
throw new RuntimeException("Failed to create the large file.");
}
} else {
throw new RuntimeException("Failed to create the large file dir.");
}
}
private static void createTempFileWithFileSize(File f, long size) throws IOException {
RandomAccessFile raf = new RandomAccessFile(f, "rw");
raf.setLength(size);
raf.close();
}
private static void checkTwoFilesAreTheSame(File f1, File f2) throws IOException, NoSuchAlgorithmException {
String checksumUpload = getFileChecksum(f1);
String checksumDownload = getFileChecksum(f2);
if (!checksumUpload.equals(checksumDownload)) {
throw new RuntimeException("The file upload does not match the file download.");
}
}
private static String getFileChecksum(File file) throws IOException, NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
try (FileInputStream fis = new FileInputStream(file); FileChannel ch = fis.getChannel()) {
final ByteBuffer buf = ByteBuffer.allocateDirect(8192);
int b = ch.read(buf);
while (b != -1 && b != 0) {
buf.flip();
final byte[] bytes = new byte[b];
buf.get(bytes);
md.update(bytes, 0, b);
buf.clear();
b = ch.read(buf);
}
return new String(md.digest(), StandardCharsets.UTF_8);
}
}
} | class FileTransferExample {
private static final String LARGE_TEST_FOLDER = "test-large-files/";
/**
* Entry point into the file transfer examples for Storage datalake.
* @param args Unused. Arguments to the program.
* @throws IOException If an I/O error occurs
* @throws NoSuchAlgorithmException If {@code MD5} isn't supported
* @throws RuntimeException If the uploaded or downloaded file wasn't found
*/
private static File createTempEmptyFile(String fileName) throws IOException {
String pathName = "./folderPath/" + LARGE_TEST_FOLDER;
File dirPath = new File(pathName);
if (dirPath.exists() || dirPath.mkdirs()) {
File f = new File(pathName + fileName);
if (f.exists() || f.createNewFile()) {
return f;
} else {
throw new RuntimeException("Failed to create the large file.");
}
} else {
throw new RuntimeException("Failed to create the large file dir.");
}
}
private static void createTempFileWithFileSize(File f, long size) throws IOException {
RandomAccessFile raf = new RandomAccessFile(f, "rw");
raf.setLength(size);
raf.close();
}
private static void checkTwoFilesAreTheSame(File f1, File f2) throws IOException, NoSuchAlgorithmException {
String checksumUpload = getFileChecksum(f1);
String checksumDownload = getFileChecksum(f2);
if (!checksumUpload.equals(checksumDownload)) {
throw new RuntimeException("The file upload does not match the file download.");
}
}
private static String getFileChecksum(File file) throws IOException, NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
try (FileInputStream fis = new FileInputStream(file); FileChannel ch = fis.getChannel()) {
final ByteBuffer buf = ByteBuffer.allocateDirect(8192);
int b = ch.read(buf);
while (b != -1 && b != 0) {
buf.flip();
final byte[] bytes = new byte[b];
buf.get(bytes);
md.update(bytes, 0, b);
buf.clear();
b = ch.read(buf);
}
return new String(md.digest(), StandardCharsets.UTF_8);
}
}
} |
nit: new line | private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) {
List<Message> listAmqpMessages = convertAMQPValueMessageToBrokeredMessage(amqpMessage);
List<ServiceBusReceivedMessage> receivedMessageList = new ArrayList<>();
for (Message oneAmqpMessage:listAmqpMessages
) {
ServiceBusReceivedMessage serviceBusReceivedMessage = deserializeMessage(oneAmqpMessage);
receivedMessageList.add(serviceBusReceivedMessage);
}
return receivedMessageList;
} | ) { | private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) {
List<Message> listAmqpMessages = convertAmqpValueMessageToBrokeredMessage(amqpMessage);
List<ServiceBusReceivedMessage> receivedMessageList = new ArrayList<>();
for (Message oneAmqpMessage:listAmqpMessages) {
ServiceBusReceivedMessage serviceBusReceivedMessage = deserializeMessage(oneAmqpMessage);
receivedMessageList.add(serviceBusReceivedMessage);
}
return receivedMessageList;
} | class ServiceBusMessageSerializer implements MessageSerializer {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private static final String ENQUEUED_TIME_UTC_NAME = "x-opt-enqueued-time";
private static final String SCHEDULED_ENQUEUE_TIME_NAME = "x-opt-scheduled-enqueue-time";
private static final String SEQUENCE_NUMBER_NAME = "x-opt-sequence-number";
private static final String LOCKED_UNTIL_NAME = "x-opt-locked-until";
private static final String PARTITION_KEY_NAME = "x-opt-partition-key";
private static final String VIA_PARTITION_KEY_NAME = "x-opt-via-partition-key";
private static final String DEAD_LETTER_SOURCE_NAME = "x-opt-deadletter-source";
private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class);
/**
* Gets the serialized size of the AMQP message.
*/
@Override
public int getSize(org.apache.qpid.proton.message.Message amqpMessage) {
if (amqpMessage == null) {
return 0;
}
int payloadSize = getPayloadSize(amqpMessage);
final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
int annotationsSize = 0;
int applicationPropertiesSize = 0;
if (messageAnnotations != null) {
final Map<Symbol, Object> map = messageAnnotations.getValue();
for (Map.Entry<Symbol, Object> entry : map.entrySet()) {
final int size = sizeof(entry.getKey()) + sizeof(entry.getValue());
annotationsSize += size;
}
}
if (applicationProperties != null) {
final Map<String, Object> map = applicationProperties.getValue();
for (Map.Entry<String, Object> entry : map.entrySet()) {
final int size = sizeof(entry.getKey()) + sizeof(entry.getValue());
applicationPropertiesSize += size;
}
}
return annotationsSize + applicationPropertiesSize + payloadSize;
}
/**
* Creates the AMQP message represented by this {@code object}. Currently, only supports serializing
* {@link ServiceBusMessage}.
*
* @param object Concrete object to deserialize.
*
* @return A new AMQP message for this {@code object}.
*
* @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}.
*/
@Override
public <T> org.apache.qpid.proton.message.Message serialize(T object) {
Objects.requireNonNull(object, "'object' to serialize cannot be null.");
if (!(object instanceof ServiceBusMessage)) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass()));
}
final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object;
final org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
final byte[] body = brokeredMessage.getBody();
amqpMessage.setBody(new Data(new Binary(body)));
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis());
}
if (amqpMessage.getProperties() == null) {
amqpMessage.setProperties(new Properties());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTime() != null) {
messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_TIME_NAME),
Date.from(brokeredMessage.getScheduledEnqueueTime()));
}
final String partitionKey = brokeredMessage.getPartitionKey();
if (partitionKey != null && !partitionKey.isEmpty()) {
messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_NAME), brokeredMessage.getPartitionKey());
}
final String viaPartitionKey = brokeredMessage.getViaPartitionKey();
if (viaPartitionKey != null && !viaPartitionKey.isEmpty()) {
messageAnnotationsMap.put(Symbol.valueOf(VIA_PARTITION_KEY_NAME), viaPartitionKey);
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(org.apache.qpid.proton.message.Message message, Class<T> clazz) {
Objects.requireNonNull(message, "'message' cannot be null.");
Objects.requireNonNull(clazz, "'clazz' cannot be null.");
if (clazz == ServiceBusReceivedMessage.class) {
return (T) deserializeMessage(message);
} else if (clazz == List.class) {
return (T) deserializeListOfMessages(message);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Deserialization only supports ServiceBusReceivedMessage."));
}
}
private ServiceBusReceivedMessage deserializeMessage(org.apache.qpid.proton.message.Message amqpMessage) {
final ServiceBusReceivedMessage brokeredMessage;
final Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
final Binary messageData = ((Data) body).getValue();
final byte[] bytes = messageData.getArray();
brokeredMessage = new ServiceBusReceivedMessage(bytes);
} else {
logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType()));
brokeredMessage = new ServiceBusReceivedMessage(EMPTY_BYTE_ARRAY);
}
} else {
logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null"));
brokeredMessage = new ServiceBusReceivedMessage(EMPTY_BYTE_ARRAY);
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.getProperties().putAll(applicationProperties.getValue());
}
brokeredMessage.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl()));
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount());
final Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
final Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
final Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
final String key = entry.getKey().toString();
final Object value = entry.getValue();
switch (key) {
case ENQUEUED_TIME_UTC_NAME:
brokeredMessage.setEnqueuedTime(((Date) value).toInstant());
break;
case SCHEDULED_ENQUEUE_TIME_NAME:
brokeredMessage.setScheduledEnqueueTime(((Date) value).toInstant());
break;
case SEQUENCE_NUMBER_NAME:
brokeredMessage.setSequenceNumber((long) value);
break;
case LOCKED_UNTIL_NAME:
brokeredMessage.setLockedUntil(((Date) value).toInstant());
break;
case PARTITION_KEY_NAME:
brokeredMessage.setPartitionKey((String) value);
break;
case VIA_PARTITION_KEY_NAME:
brokeredMessage.setViaPartitionKey((String) value);
break;
case DEAD_LETTER_SOURCE_NAME:
brokeredMessage.setDeadLetterSource((String) value);
break;
default:
logger.info("Unrecognised key: {}, value: {}", key, value);
break;
}
}
}
}
return brokeredMessage;
}
private static int getPayloadSize(org.apache.qpid.proton.message.Message msg) {
if (msg == null || msg.getBody() == null) {
return 0;
}
final Section bodySection = msg.getBody();
if (bodySection instanceof AmqpValue) {
return sizeof(((AmqpValue) bodySection).getValue());
} else if (bodySection instanceof AmqpSequence) {
return sizeof(((AmqpSequence) bodySection).getValue());
} else if (bodySection instanceof Data) {
final Data payloadSection = (Data) bodySection;
final Binary payloadBytes = payloadSection.getValue();
return sizeof(payloadBytes);
} else {
return 0;
}
}
@SuppressWarnings("rawtypes")
private static int sizeof(Object obj) {
if (obj == null) {
return 0;
}
if (obj instanceof String) {
return obj.toString().length() << 1;
}
if (obj instanceof Symbol) {
return ((Symbol) obj).length() << 1;
}
if (obj instanceof Byte || obj instanceof UnsignedByte) {
return Byte.BYTES;
}
if (obj instanceof Integer || obj instanceof UnsignedInteger) {
return Integer.BYTES;
}
if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) {
return Long.BYTES;
}
if (obj instanceof Short || obj instanceof UnsignedShort) {
return Short.BYTES;
}
if (obj instanceof Boolean) {
return 1;
}
if (obj instanceof Character) {
return 4;
}
if (obj instanceof Float) {
return Float.BYTES;
}
if (obj instanceof Double) {
return Double.BYTES;
}
if (obj instanceof UUID) {
return 16;
}
if (obj instanceof Decimal32) {
return 4;
}
if (obj instanceof Decimal64) {
return 8;
}
if (obj instanceof Decimal128) {
return 16;
}
if (obj instanceof Binary) {
return ((Binary) obj).getLength();
}
if (obj instanceof Declare) {
return 7;
}
if (obj instanceof Discharge) {
Discharge discharge = (Discharge) obj;
return 12 + discharge.getTxnId().getLength();
}
if (obj instanceof Map) {
int size = 8;
Map map = (Map) obj;
for (Object value : map.keySet()) {
size += sizeof(value);
}
for (Object value : map.values()) {
size += sizeof(value);
}
return size;
}
if (obj instanceof Iterable) {
int size = 8;
for (Object innerObject : (Iterable) obj) {
size += sizeof(innerObject);
}
return size;
}
if (obj.getClass().isArray()) {
int size = 8;
int length = Array.getLength(obj);
for (int i = 0; i < length; i++) {
size += sizeof(Array.get(obj, i));
}
return size;
}
throw new IllegalArgumentException(String.format(Locale.US,
"Encoding Type: %s is not supported", obj.getClass()));
}
private List<Message> convertAMQPValueMessageToBrokeredMessage(Message amqpResponseMessage) {
List<Message> messageList = new ArrayList<>();
int statusCode = RequestResponseUtils.getResponseStatusCode(amqpResponseMessage);
if (statusCode == ManagementConstants.REQUEST_RESPONSE_OK_STATUS_CODE) {
Object responseBodyMap = ((AmqpValue) amqpResponseMessage.getBody()).getValue();
if (responseBodyMap != null && responseBodyMap instanceof Map) {
Object messages = ((Map) responseBodyMap).get(ManagementConstants.REQUEST_RESPONSE_MESSAGES);
if (messages != null && messages instanceof Iterable) {
for (Object message : (Iterable) messages) {
if (message instanceof Map) {
Message responseMessage = Message.Factory.create();
Binary messagePayLoad = (Binary) ((Map) message)
.get(ManagementConstants.REQUEST_RESPONSE_MESSAGE);
responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(),
messagePayLoad.getLength());
messageList.add(responseMessage);
}
}
}
}
}
return messageList;
}
} | class ServiceBusMessageSerializer implements MessageSerializer {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private static final String ENQUEUED_TIME_UTC_NAME = "x-opt-enqueued-time";
private static final String SCHEDULED_ENQUEUE_TIME_NAME = "x-opt-scheduled-enqueue-time";
private static final String SEQUENCE_NUMBER_NAME = "x-opt-sequence-number";
private static final String LOCKED_UNTIL_NAME = "x-opt-locked-until";
private static final String PARTITION_KEY_NAME = "x-opt-partition-key";
private static final String VIA_PARTITION_KEY_NAME = "x-opt-via-partition-key";
private static final String DEAD_LETTER_SOURCE_NAME = "x-opt-deadletter-source";
private static final String REQUEST_RESPONSE_MESSAGES = "messages";
private static final String REQUEST_RESPONSE_MESSAGE = "message";
private static final int REQUEST_RESPONSE_OK_STATUS_CODE = 200;
private final ClientLogger logger = new ClientLogger(ServiceBusMessageSerializer.class);
/**
* Gets the serialized size of the AMQP message.
*/
@Override
public int getSize(org.apache.qpid.proton.message.Message amqpMessage) {
if (amqpMessage == null) {
return 0;
}
int payloadSize = getPayloadSize(amqpMessage);
final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
int annotationsSize = 0;
int applicationPropertiesSize = 0;
if (messageAnnotations != null) {
final Map<Symbol, Object> map = messageAnnotations.getValue();
for (Map.Entry<Symbol, Object> entry : map.entrySet()) {
final int size = sizeof(entry.getKey()) + sizeof(entry.getValue());
annotationsSize += size;
}
}
if (applicationProperties != null) {
final Map<String, Object> map = applicationProperties.getValue();
for (Map.Entry<String, Object> entry : map.entrySet()) {
final int size = sizeof(entry.getKey()) + sizeof(entry.getValue());
applicationPropertiesSize += size;
}
}
return annotationsSize + applicationPropertiesSize + payloadSize;
}
/**
* Creates the AMQP message represented by this {@code object}. Currently, only supports serializing
* {@link ServiceBusMessage}.
*
* @param object Concrete object to deserialize.
*
* @return A new AMQP message for this {@code object}.
*
* @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}.
*/
@Override
public <T> org.apache.qpid.proton.message.Message serialize(T object) {
Objects.requireNonNull(object, "'object' to serialize cannot be null.");
if (!(object instanceof ServiceBusMessage)) {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass()));
}
final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object;
final org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
final byte[] body = brokeredMessage.getBody();
amqpMessage.setBody(new Data(new Binary(body)));
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis());
}
if (amqpMessage.getProperties() == null) {
amqpMessage.setProperties(new Properties());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTime() != null) {
messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_TIME_NAME),
Date.from(brokeredMessage.getScheduledEnqueueTime()));
}
final String partitionKey = brokeredMessage.getPartitionKey();
if (partitionKey != null && !partitionKey.isEmpty()) {
messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_NAME), brokeredMessage.getPartitionKey());
}
final String viaPartitionKey = brokeredMessage.getViaPartitionKey();
if (viaPartitionKey != null && !viaPartitionKey.isEmpty()) {
messageAnnotationsMap.put(Symbol.valueOf(VIA_PARTITION_KEY_NAME), viaPartitionKey);
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(org.apache.qpid.proton.message.Message message, Class<T> clazz) {
Objects.requireNonNull(message, "'message' cannot be null.");
Objects.requireNonNull(clazz, "'clazz' cannot be null.");
if (clazz == ServiceBusReceivedMessage.class) {
return (T) deserializeMessage(message);
} else if (clazz == List.class) {
return (T) deserializeListOfMessages(message);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(
"Deserialization only supports ServiceBusReceivedMessage."));
}
}
private ServiceBusReceivedMessage deserializeMessage(org.apache.qpid.proton.message.Message amqpMessage) {
final ServiceBusReceivedMessage brokeredMessage;
final Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
final Binary messageData = ((Data) body).getValue();
final byte[] bytes = messageData.getArray();
brokeredMessage = new ServiceBusReceivedMessage(bytes);
} else {
logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, body.getType()));
brokeredMessage = new ServiceBusReceivedMessage(EMPTY_BYTE_ARRAY);
}
} else {
logger.warning(String.format(Messages.MESSAGE_NOT_OF_TYPE, "null"));
brokeredMessage = new ServiceBusReceivedMessage(EMPTY_BYTE_ARRAY);
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.getProperties().putAll(applicationProperties.getValue());
}
brokeredMessage.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl()));
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount());
final Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
final Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
final Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
final String key = entry.getKey().toString();
final Object value = entry.getValue();
switch (key) {
case ENQUEUED_TIME_UTC_NAME:
brokeredMessage.setEnqueuedTime(((Date) value).toInstant());
break;
case SCHEDULED_ENQUEUE_TIME_NAME:
brokeredMessage.setScheduledEnqueueTime(((Date) value).toInstant());
break;
case SEQUENCE_NUMBER_NAME:
brokeredMessage.setSequenceNumber((long) value);
break;
case LOCKED_UNTIL_NAME:
brokeredMessage.setLockedUntil(((Date) value).toInstant());
break;
case PARTITION_KEY_NAME:
brokeredMessage.setPartitionKey((String) value);
break;
case VIA_PARTITION_KEY_NAME:
brokeredMessage.setViaPartitionKey((String) value);
break;
case DEAD_LETTER_SOURCE_NAME:
brokeredMessage.setDeadLetterSource((String) value);
break;
default:
logger.info("Unrecognised key: {}, value: {}", key, value);
break;
}
}
}
}
return brokeredMessage;
}
private static int getPayloadSize(org.apache.qpid.proton.message.Message msg) {
if (msg == null || msg.getBody() == null) {
return 0;
}
final Section bodySection = msg.getBody();
if (bodySection instanceof AmqpValue) {
return sizeof(((AmqpValue) bodySection).getValue());
} else if (bodySection instanceof AmqpSequence) {
return sizeof(((AmqpSequence) bodySection).getValue());
} else if (bodySection instanceof Data) {
final Data payloadSection = (Data) bodySection;
final Binary payloadBytes = payloadSection.getValue();
return sizeof(payloadBytes);
} else {
return 0;
}
}
@SuppressWarnings("rawtypes")
private static int sizeof(Object obj) {
if (obj == null) {
return 0;
}
if (obj instanceof String) {
return obj.toString().length() << 1;
}
if (obj instanceof Symbol) {
return ((Symbol) obj).length() << 1;
}
if (obj instanceof Byte || obj instanceof UnsignedByte) {
return Byte.BYTES;
}
if (obj instanceof Integer || obj instanceof UnsignedInteger) {
return Integer.BYTES;
}
if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) {
return Long.BYTES;
}
if (obj instanceof Short || obj instanceof UnsignedShort) {
return Short.BYTES;
}
if (obj instanceof Boolean) {
return 1;
}
if (obj instanceof Character) {
return 4;
}
if (obj instanceof Float) {
return Float.BYTES;
}
if (obj instanceof Double) {
return Double.BYTES;
}
if (obj instanceof UUID) {
return 16;
}
if (obj instanceof Decimal32) {
return 4;
}
if (obj instanceof Decimal64) {
return 8;
}
if (obj instanceof Decimal128) {
return 16;
}
if (obj instanceof Binary) {
return ((Binary) obj).getLength();
}
if (obj instanceof Declare) {
return 7;
}
if (obj instanceof Discharge) {
Discharge discharge = (Discharge) obj;
return 12 + discharge.getTxnId().getLength();
}
if (obj instanceof Map) {
int size = 8;
Map map = (Map) obj;
for (Object value : map.keySet()) {
size += sizeof(value);
}
for (Object value : map.values()) {
size += sizeof(value);
}
return size;
}
if (obj instanceof Iterable) {
int size = 8;
for (Object innerObject : (Iterable) obj) {
size += sizeof(innerObject);
}
return size;
}
if (obj.getClass().isArray()) {
int size = 8;
int length = Array.getLength(obj);
for (int i = 0; i < length; i++) {
size += sizeof(Array.get(obj, i));
}
return size;
}
throw new IllegalArgumentException(String.format(Locale.US,
"Encoding Type: %s is not supported", obj.getClass()));
}
private List<Message> convertAmqpValueMessageToBrokeredMessage(Message amqpResponseMessage) {
List<Message> messageList = new ArrayList<>();
int statusCode = RequestResponseUtils.getResponseStatusCode(amqpResponseMessage);
if (statusCode == REQUEST_RESPONSE_OK_STATUS_CODE) {
Object responseBodyMap = ((AmqpValue) amqpResponseMessage.getBody()).getValue();
if (responseBodyMap != null && responseBodyMap instanceof Map) {
Object messages = ((Map) responseBodyMap).get(REQUEST_RESPONSE_MESSAGES);
if (messages != null && messages instanceof Iterable) {
for (Object message : (Iterable) messages) {
if (message instanceof Map) {
Message responseMessage = Message.Factory.create();
Binary messagePayLoad = (Binary) ((Map) message)
.get(REQUEST_RESPONSE_MESSAGE);
responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(),
messagePayLoad.getLength());
messageList.add(responseMessage);
}
}
}
}
}
return messageList;
}
} |
It is already there in `@AfterEach` | void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
Mockito.framework().clearInlineMocks();
} | Mockito.framework().clearInlineMocks(); | void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(Mono.just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(Mono.just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this peek one messages.
*/
@Test
void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage message1;
@Mock
private ServiceBusReceivedMessage message2;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
/* Arrange */
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(message1), just(message2));
StepVerifier.create(consumer.peek())
.expectNext(message1)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(message2)
.verifyComplete();
}
/**
* Verifies that this peek one messages.
*/
@Test
void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} |
Every time a peek operation is called, it'll create another management channel node. This is a bit wasteful if we keep getting the management node and have to recreate it. | public Mono<ServiceBusManagementNode> getManagementNode(String entityPath) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get management instance for '%s'",
connectionId, entityPath))));
}
final ServiceBusManagementNode existing = managementNodes.get(entityPath);
if (existing != null) {
return Mono.just(existing);
}
return getReactorConnection().then(
Mono.fromCallable(() -> {
final ServiceBusManagementNode node = managementNodes.computeIfAbsent(entityPath, key -> {
final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME;
final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME;
final String address = entityPath + "/" + MANAGEMENT_ADDRESS;
logger.info("Creating management node. entityPath: [{}]. address: [{}]. linkName: [{}]",
entityPath, address, linkName);
TokenManager cbsBasedTokenManager = new AzureTokenManagerProvider(
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, fullyQualifiedNamespace, entityPath)
.getTokenManager(getClaimsBasedSecurityNode(), entityPath) ;
final Mono<RequestResponseChannel> requestResponseChannel =
createRequestResponseChannel(sessionName, linkName, address);
return new ManagementChannel(requestResponseChannel, entityPath, tokenCredential,
tokenManagerProvider, messageSerializer, scheduler, cbsBasedTokenManager);
});
return node;
}));
} | TokenManager cbsBasedTokenManager = new AzureTokenManagerProvider( | public Mono<ServiceBusManagementNode> getManagementNode(String entityPath) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get management instance for '%s'",
connectionId, entityPath))));
}
final ServiceBusManagementNode existing = managementNodes.get(entityPath);
if (existing != null) {
return Mono.just(existing);
}
return getReactorConnection().then(
Mono.fromCallable(() -> {
final ServiceBusManagementNode node = managementNodes.computeIfAbsent(entityPath, key -> {
final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME;
final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME;
final String address = entityPath + "/" + MANAGEMENT_ADDRESS;
logger.info("Creating management node. entityPath: [{}]. address: [{}]. linkName: [{}]",
entityPath, address, linkName);
TokenManager cbsBasedTokenManager = new AzureTokenManagerProvider(
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, fullyQualifiedNamespace, entityPath)
.getTokenManager(getClaimsBasedSecurityNode(), entityPath);
final Mono<RequestResponseChannel> requestResponseChannel =
createRequestResponseChannel(sessionName, linkName, address);
return new ManagementChannel(requestResponseChannel, messageSerializer, scheduler,
cbsBasedTokenManager);
});
return node;
}));
} | class ServiceBusReactorAmqpConnection extends ReactorConnection implements ServiceBusAmqpConnection {
private static final String MANAGEMENT_SESSION_NAME = "mgmt-session";
private static final String MANAGEMENT_LINK_NAME = "mgmt";
private static final String MANAGEMENT_ADDRESS = "$management";
/** This is used in setting up management chhannel and it is always fixed.
* This is not used by sdk user trying to receive/send messages.*/
public static final SenderSettleMode MANAGEMENT_SEND_SETTLE_MODE = SenderSettleMode.SETTLED;
public static final ReceiverSettleMode MANAGEMENT_RECEIVE_SETTLE_MODE = ReceiverSettleMode.FIRST;
private final ClientLogger logger = new ClientLogger(ServiceBusReactorAmqpConnection.class);
/**
* Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service
* load balance messages is the eventHubName.
*/
private final ConcurrentHashMap<String, AmqpSendLink> sendLinks = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, ServiceBusManagementNode> managementNodes = new ConcurrentHashMap<>();
private final String connectionId;
private final ReactorProvider reactorProvider;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final AmqpRetryOptions retryOptions;
private final MessageSerializer messageSerializer;
private final TokenCredential tokenCredential;
private final Scheduler scheduler;
private final String fullyQualifiedNamespace;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides a token manager for authorizing with CBS node.
* @param messageSerializer Serializes and deserializes proton-j messages.
*/
public ServiceBusReactorAmqpConnection(String connectionId, ConnectionOptions connectionOptions,
ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider,
TokenManagerProvider tokenManagerProvider,
MessageSerializer messageSerializer, String product, String clientVersion) {
super(connectionId, connectionOptions, reactorProvider, handlerProvider, tokenManagerProvider,
messageSerializer, product, clientVersion,
MANAGEMENT_SEND_SETTLE_MODE, MANAGEMENT_RECEIVE_SETTLE_MODE);
this.connectionId = connectionId;
this.reactorProvider = reactorProvider;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = tokenManagerProvider;
this.retryOptions = connectionOptions.getRetry();
this.messageSerializer = messageSerializer;
this.tokenCredential = connectionOptions.getTokenCredential();
this.scheduler = connectionOptions.getScheduler();
this.fullyQualifiedNamespace = connectionOptions.getFullyQualifiedNamespace();
}
@Override
/**
* Creates or gets a send link. The same link is returned if there is an existing send link with the same {@code
* linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param retryOptions Options to use when creating the link.
* @return A new or existing send link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpSendLink> createSendLink(String linkName, String entityPath, AmqpRetryOptions retryOptions) {
return createSession(entityPath).flatMap(session -> {
logger.verbose("Get or create producer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createProducer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy)
.cast(AmqpSendLink.class);
});
}
/**
* Creates or gets an existing receive link. The same link is returned if there is an existing receive link with the
* same {@code linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param receiveMode Consumer options to use when creating the link.
* @return A new or existing receive link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpReceiveLink> createReceiveLink(String linkName, String entityPath, ReceiveMode receiveMode) {
return createSession(entityPath).cast(ServiceBusSession.class)
.flatMap(session -> {
logger.verbose("Get or create consumer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createConsumer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy,
receiveMode);
});
}
@Override
public void dispose() {
logger.info("Disposing of connection.");
sendLinks.forEach((key, value) -> value.dispose());
sendLinks.clear();
super.dispose();
}
@Override
protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new ServiceBusReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, retryOptions.getTryTimeout(), messageSerializer);
}
} | class ServiceBusReactorAmqpConnection extends ReactorConnection implements ServiceBusAmqpConnection {
private static final String MANAGEMENT_SESSION_NAME = "mgmt-session";
private static final String MANAGEMENT_LINK_NAME = "mgmt";
private static final String MANAGEMENT_ADDRESS = "$management";
private final ClientLogger logger = new ClientLogger(ServiceBusReactorAmqpConnection.class);
/**
* Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service
* load balance messages is the eventHubName.
*/
private final ConcurrentHashMap<String, AmqpSendLink> sendLinks = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, ServiceBusManagementNode> managementNodes = new ConcurrentHashMap<>();
private final String connectionId;
private final ReactorProvider reactorProvider;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final AmqpRetryOptions retryOptions;
private final MessageSerializer messageSerializer;
private final TokenCredential tokenCredential;
private final Scheduler scheduler;
private final String fullyQualifiedNamespace;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides a token manager for authorizing with CBS node.
* @param messageSerializer Serializes and deserializes proton-j messages.
*/
public ServiceBusReactorAmqpConnection(String connectionId, ConnectionOptions connectionOptions,
ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider,
TokenManagerProvider tokenManagerProvider,
MessageSerializer messageSerializer, String product, String clientVersion) {
super(connectionId, connectionOptions, reactorProvider, handlerProvider, tokenManagerProvider,
messageSerializer, product, clientVersion,
SenderSettleMode.SETTLED, ReceiverSettleMode.FIRST);
this.connectionId = connectionId;
this.reactorProvider = reactorProvider;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = tokenManagerProvider;
this.retryOptions = connectionOptions.getRetry();
this.messageSerializer = messageSerializer;
this.tokenCredential = connectionOptions.getTokenCredential();
this.scheduler = connectionOptions.getScheduler();
this.fullyQualifiedNamespace = connectionOptions.getFullyQualifiedNamespace();
}
@Override
/**
* Creates or gets a send link. The same link is returned if there is an existing send link with the same {@code
* linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param retryOptions Options to use when creating the link.
* @return A new or existing send link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpSendLink> createSendLink(String linkName, String entityPath, AmqpRetryOptions retryOptions) {
return createSession(entityPath).flatMap(session -> {
logger.verbose("Get or create producer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createProducer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy)
.cast(AmqpSendLink.class);
});
}
/**
* Creates or gets an existing receive link. The same link is returned if there is an existing receive link with the
* same {@code linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param receiveMode Consumer options to use when creating the link.
* @return A new or existing receive link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpReceiveLink> createReceiveLink(String linkName, String entityPath, ReceiveMode receiveMode) {
return createSession(entityPath).cast(ServiceBusSession.class)
.flatMap(session -> {
logger.verbose("Get or create consumer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createConsumer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy,
receiveMode);
});
}
@Override
public void dispose() {
logger.info("Disposing of connection.");
sendLinks.forEach((key, value) -> value.dispose());
sendLinks.clear();
super.dispose();
}
@Override
protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new ServiceBusReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, retryOptions.getTryTimeout(), messageSerializer);
}
} |
Move these to tests and use stepverifier. | public void peekOneMessage() {
final int numberOfEvents = 1;
String connectionString = System.getenv("AZURE_SERVICEBUS_CONNECTION_STRING")
+ ";EntityPath=hemant-test1";
log(connectionString);
ServiceBusReceiverAsyncClient queueReceiverAsyncClient = new ServiceBusClientBuilder()
.connectionString(connectionString)
.scheduler(Schedulers.parallel())
.buildAsyncReceiverClient();
queueReceiverAsyncClient.peek()
.doOnNext(receivedMessage -> {
System.out.println("!!!!!! doOnNext Got message from queue: " + receivedMessage.getBodyAsString());
})
.subscribe(receivedMessage -> {
System.out.println("!!!!!! subscribe Got message from queue: " + receivedMessage.getBodyAsString());
},
error -> {
System.err.println("!!!!!! Error occurred while consuming messages: " + error);
});
try {
Thread.sleep(90000);
} catch (Exception ex) {
}
System.out.println("!!!!!! Completed.");
} | Thread.sleep(90000); | public void peekOneMessage() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.peek())
.then(() -> sendMessages(numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1));
} | class ServiceBusReceiverAsyncClientPeek {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 1;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientPeek.class);
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<org.apache.qpid.proton.message.Message> messageProcessor = DirectProcessor.create();
private final DirectProcessor<Throwable> errorProcessor = DirectProcessor.create();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final DirectProcessor<AmqpShutdownSignal> shutdownProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Captor
private ArgumentCaptor<Supplier<Integer>> creditSupplier;
private Mono<AmqpReceiveLink> receiveLinkMono;
private List<org.apache.qpid.proton.message.Message> messages = new ArrayList<>();
private ServiceBusReceiverAsyncClient consumer;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
receiveLinkMono = Mono.fromCallable(() -> amqpReceiveLink);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor);
String connectionString = System.getenv("AZURE_SERVICEBUS_CONNECTION_STRING")
+ ";EntityPath=hemant-test1";
consumer = new ServiceBusClientBuilder()
.connectionString(connectionString)
.buildAsyncReceiverClient();
}
@AfterEach
public void teardown() {
messages.clear();
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this receives a number of events. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
public void receivesNumberOfEvents() {
final int numberOfEvents = 2;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
@Test
private void sendMessages(int numberOfEvents) {
FluxSink<org.apache.qpid.proton.message.Message> sink = messageProcessor.sink();
for (int i = 0; i < numberOfEvents; i++) {
sink.next(getMessage(PAYLOAD_BYTES, messageTrackingUUID));
}
}
static final Instant ENQUEUED_TIME = Instant.ofEpochSecond(1561344661);
static final Long OFFSET = 1534L;
static final String PARTITION_KEY = "a-partition-key";
static final Long SEQUENCE_NUMBER = 1025L;
static final String OTHER_SYSTEM_PROPERTY = "Some-other-system-property";
static final Boolean OTHER_SYSTEM_PROPERTY_VALUE = Boolean.TRUE;
static final Map<String, Object> APPLICATION_PROPERTIES = new HashMap<>();
static final String MESSAGE_TRACKING_ID = "message-tracking-id";
static Symbol getSymbol(AmqpMessageConstant messageConstant) {
return Symbol.getSymbol(messageConstant.getValue());
}
/**
* Creates a mock message with the contents provided.
*/
static org.apache.qpid.proton.message.Message getMessage(byte[] contents, String messageTrackingValue) {
final Map<Symbol, Object> systemProperties = new HashMap<>();
systemProperties.put(getSymbol(AmqpMessageConstant.OFFSET_ANNOTATION_NAME), String.valueOf(OFFSET));
systemProperties.put(getSymbol(AmqpMessageConstant.PARTITION_KEY_ANNOTATION_NAME), PARTITION_KEY);
systemProperties.put(getSymbol(AmqpMessageConstant.ENQUEUED_TIME_UTC_ANNOTATION_NAME),
Date.from(ENQUEUED_TIME));
systemProperties.put(getSymbol(AmqpMessageConstant.SEQUENCE_NUMBER_ANNOTATION_NAME), SEQUENCE_NUMBER);
systemProperties.put(Symbol.getSymbol(OTHER_SYSTEM_PROPERTY), OTHER_SYSTEM_PROPERTY_VALUE);
final Message message = Proton.message();
message.setMessageAnnotations(new MessageAnnotations(systemProperties));
Map<String, Object> applicationProperties = new HashMap<>();
APPLICATION_PROPERTIES.forEach(applicationProperties::put);
if (!CoreUtils.isNullOrEmpty(messageTrackingValue)) {
applicationProperties.put(MESSAGE_TRACKING_ID, messageTrackingValue);
}
message.setApplicationProperties(new ApplicationProperties(applicationProperties));
message.setBody(new Data(new Binary(contents)));
return message;
}
private void log(String message) {
System.out.println(message);
}
} | class ServiceBusReceiverAsyncClientPeek {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 1;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientPeek.class);
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<org.apache.qpid.proton.message.Message> messageProcessor = DirectProcessor.create();
private final DirectProcessor<Throwable> errorProcessor = DirectProcessor.create();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final DirectProcessor<AmqpShutdownSignal> shutdownProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Captor
private ArgumentCaptor<Supplier<Integer>> creditSupplier;
private Mono<AmqpReceiveLink> receiveLinkMono;
private List<org.apache.qpid.proton.message.Message> messages = new ArrayList<>();
private ServiceBusReceiverAsyncClient consumer;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
receiveLinkMono = Mono.fromCallable(() -> amqpReceiveLink);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor);
String connectionString = System.getenv("AZURE_SERVICEBUS_CONNECTION_STRING")
+ ";EntityPath=hemant-test1";
consumer = new ServiceBusClientBuilder()
.connectionString(connectionString)
.buildAsyncReceiverClient();
}
@AfterEach
public void teardown() {
messages.clear();
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this receives a number of events. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
public void receivesNumberOfEvents() {
final int numberOfEvents = 2;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
@Test
private void sendMessages(int numberOfEvents) {
FluxSink<org.apache.qpid.proton.message.Message> sink = messageProcessor.sink();
for (int i = 0; i < numberOfEvents; i++) {
sink.next(getMessage(PAYLOAD_BYTES, messageTrackingUUID));
}
}
static final Instant ENQUEUED_TIME = Instant.ofEpochSecond(1561344661);
static final Long OFFSET = 1534L;
static final String PARTITION_KEY = "a-partition-key";
static final Long SEQUENCE_NUMBER = 1025L;
static final String OTHER_SYSTEM_PROPERTY = "Some-other-system-property";
static final Boolean OTHER_SYSTEM_PROPERTY_VALUE = Boolean.TRUE;
static final Map<String, Object> APPLICATION_PROPERTIES = new HashMap<>();
static final String MESSAGE_TRACKING_ID = "message-tracking-id";
static Symbol getSymbol(AmqpMessageConstant messageConstant) {
return Symbol.getSymbol(messageConstant.getValue());
}
/**
* Creates a mock message with the contents provided.
*/
static org.apache.qpid.proton.message.Message getMessage(byte[] contents, String messageTrackingValue) {
final Map<Symbol, Object> systemProperties = new HashMap<>();
systemProperties.put(getSymbol(AmqpMessageConstant.OFFSET_ANNOTATION_NAME), String.valueOf(OFFSET));
systemProperties.put(getSymbol(AmqpMessageConstant.PARTITION_KEY_ANNOTATION_NAME), PARTITION_KEY);
systemProperties.put(getSymbol(AmqpMessageConstant.ENQUEUED_TIME_UTC_ANNOTATION_NAME),
Date.from(ENQUEUED_TIME));
systemProperties.put(getSymbol(AmqpMessageConstant.SEQUENCE_NUMBER_ANNOTATION_NAME), SEQUENCE_NUMBER);
systemProperties.put(Symbol.getSymbol(OTHER_SYSTEM_PROPERTY), OTHER_SYSTEM_PROPERTY_VALUE);
final Message message = Proton.message();
message.setMessageAnnotations(new MessageAnnotations(systemProperties));
Map<String, Object> applicationProperties = new HashMap<>();
APPLICATION_PROPERTIES.forEach(applicationProperties::put);
if (!CoreUtils.isNullOrEmpty(messageTrackingValue)) {
applicationProperties.put(MESSAGE_TRACKING_ID, messageTrackingValue);
}
message.setApplicationProperties(new ApplicationProperties(applicationProperties));
message.setBody(new Data(new Binary(contents)));
return message;
}
private void log(String message) {
System.out.println(message);
}
} |
sender.send(message).then(receiver.peek()) is probably what you want. thenMany suggests it returns a flux. | void peekMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).thenMany(receiver.peek()))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
} | StepVerifier.create(sender.send(message).thenMany(receiver.peek())) | void peekMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).then(receiver.peek()))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sender = createBuilder().buildAsyncSenderClient();
}
@Override
protected void afterTest() {
dispose(receiver, sender);
}
/**
* Verifies that we can send and receive a message.
*/
@Test
void receiveMessageAutoComplete() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).thenMany(receiver.receive().take(1)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a message.
*/
@Test
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ReceiveMessageOptions receiveMessageOptions;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
receiveMessageOptions = new ReceiveMessageOptions().setAutoComplete(true);
}
@Override
protected void beforeTest() {
sender = createBuilder().buildAsyncSenderClient();
receiver = createBuilder()
.receiveMessageOptions(receiveMessageOptions)
.buildAsyncReceiverClient();
}
@Override
protected void afterTest() {
dispose(receiver, sender);
}
/**
* Verifies that we can send and receive a message.
*/
@Test
void receiveMessageAutoComplete() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).thenMany(receiver.receive().take(1)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a message.
*/
@Test
/**
* Verifies that we can send and peek a message.
*/
@Test
void peekFromSequencenumberMessage() {
final long fromSequenceNumber = 1;
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).then(receiver.peek(fromSequenceNumber)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
} |
When you return inline mocks like this, you need to add the Mockito.clearInlinMocks() or else it won't be garbage collected. | void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
} | .thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class))); | void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(Mono.just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(Mono.just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this peek one messages.
*/
@Test
void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage message1;
@Mock
private ServiceBusReceivedMessage message2;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
/* Arrange */
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(message1), just(message2));
StepVerifier.create(consumer.peek())
.expectNext(message1)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(message2)
.verifyComplete();
}
/**
* Verifies that this peek one messages.
*/
@Test
void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} |
v2 should not use PagedList. This class is provided since return type in v2 is PagedIterable, which is not compatibly with v1 PagedList. | protected void loadNextPage() {
this.items.addAll(pagedResponseIterator.next().getValue());
} | this.items.addAll(pagedResponseIterator.next().getValue()); | protected void loadNextPage() {
this.items.addAll(pagedResponseIterator.next().getValue());
} | class PagedList<E> implements List<E> {
/** The items retrieved. */
private final List<E> items;
/** The paged response iterator for not retrieved items. */
private Iterator<PagedResponse<E>> pagedResponseIterator;
/**
* Creates an instance of PagedList.
*/
public PagedList() {
items = new ArrayList<>();
pagedResponseIterator = Collections.emptyIterator();
}
/**
* Creates an instance of PagedList from a {@link PagedIterable}.
*
* @param pagedIterable the {@link PagedIterable} object.
*/
public PagedList(PagedIterable<E> pagedIterable) {
items = new ArrayList<>();
Objects.requireNonNull(pagedIterable, "'pagedIterable' cannot be null.");
this.pagedResponseIterator = pagedIterable.iterableByPage().iterator();
}
/**
* If there are more pages available.
*
* @return true if there are more pages to load. False otherwise.
*/
protected boolean hasNextPage() {
return pagedResponseIterator.hasNext();
}
/**
* Loads a page from next page link.
* The exceptions are wrapped into Java Runtime exceptions.
*/
/**
* Keep loading the next page from the next page link until all items are loaded.
*/
public void loadAll() {
while (hasNextPage()) {
loadNextPage();
}
}
@Override
public int size() {
loadAll();
return items.size();
}
@Override
public boolean isEmpty() {
return items.isEmpty() && !hasNextPage();
}
@Override
public boolean contains(Object o) {
return indexOf(o) >= 0;
}
@Override
public Iterator<E> iterator() {
return new ListItr(0);
}
@Override
public Object[] toArray() {
loadAll();
return items.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
loadAll();
return items.toArray(a);
}
@Override
public boolean add(E e) {
loadAll();
return items.add(e);
}
@Override
public boolean remove(Object o) {
int index = indexOf(o);
if (index != -1) {
items.remove(index);
return true;
} else {
return false;
}
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object e : c) {
if (!contains(e)) {
return false;
}
}
return true;
}
@Override
public boolean addAll(Collection<? extends E> c) {
return items.addAll(c);
}
@Override
public boolean addAll(int index, Collection<? extends E> c) {
return items.addAll(index, c);
}
@Override
public boolean removeAll(Collection<?> c) {
return items.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return items.retainAll(c);
}
@Override
public void clear() {
items.clear();
pagedResponseIterator = Collections.emptyIterator();
}
@Override
public E get(int index) {
tryLoadToIndex(index);
return items.get(index);
}
@Override
public E set(int index, E element) {
tryLoadToIndex(index);
return items.set(index, element);
}
@Override
public void add(int index, E element) {
items.add(index, element);
}
@Override
public E remove(int index) {
tryLoadToIndex(index);
return items.remove(index);
}
@Override
public int indexOf(Object o) {
int index = items.indexOf(o);
if (index != -1) {
return index;
}
while (hasNextPage()) {
int itemsSize = items.size();
List<E> nextPageItems = pagedResponseIterator.next().getValue();
this.items.addAll(nextPageItems);
index = nextPageItems.indexOf(o);
if (index != -1) {
index = itemsSize + index;
return index;
}
}
return -1;
}
@Override
public int lastIndexOf(Object o) {
loadAll();
return items.lastIndexOf(o);
}
@Override
public ListIterator<E> listIterator() {
return new ListItr(0);
}
@Override
public ListIterator<E> listIterator(int index) {
tryLoadToIndex(index);
return new ListItr(index);
}
@Override
public List<E> subList(int fromIndex, int toIndex) {
while ((fromIndex >= items.size() || toIndex >= items.size()) && hasNextPage()) {
loadNextPage();
}
return items.subList(fromIndex, toIndex);
}
private void tryLoadToIndex(int index) {
while (index >= items.size() && hasNextPage()) {
loadNextPage();
}
}
/**
* The implementation of {@link ListIterator} for PagedList.
*/
private class ListItr implements ListIterator<E> {
/**
* index of next element to return.
*/
private int nextIndex;
/**
* index of last element returned; -1 if no such action happened.
*/
private int lastRetIndex = -1;
/**
* Creates an instance of the ListIterator.
*
* @param index the position in the list to start.
*/
ListItr(int index) {
this.nextIndex = index;
}
@Override
public boolean hasNext() {
return this.nextIndex != items.size() || hasNextPage();
}
@Override
public E next() {
if (this.nextIndex >= items.size()) {
if (!hasNextPage()) {
throw new NoSuchElementException();
} else {
loadNextPage();
}
return next();
} else {
try {
E nextItem = items.get(this.nextIndex);
this.lastRetIndex = this.nextIndex;
this.nextIndex = this.nextIndex + 1;
return nextItem;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void remove() {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.remove(this.lastRetIndex);
this.nextIndex = this.lastRetIndex;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public boolean hasPrevious() {
return this.nextIndex != 0;
}
@Override
public E previous() {
int i = this.nextIndex - 1;
if (i < 0) {
throw new NoSuchElementException();
} else if (i >= items.size()) {
throw new ConcurrentModificationException();
} else {
try {
this.nextIndex = i;
this.lastRetIndex = i;
return items.get(this.lastRetIndex);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public int nextIndex() {
return this.nextIndex;
}
@Override
public int previousIndex() {
return this.nextIndex - 1;
}
@Override
public void set(E e) {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.set(this.lastRetIndex, e);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void add(E e) {
try {
items.add(this.nextIndex, e);
this.nextIndex = this.nextIndex + 1;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
} | class PagedList<E> implements List<E> {
/** The items retrieved. */
private final List<E> items;
/** The paged response iterator for not retrieved items. */
private Iterator<PagedResponse<E>> pagedResponseIterator;
/**
* Creates an instance of PagedList.
*/
public PagedList() {
items = new ArrayList<>();
pagedResponseIterator = Collections.emptyIterator();
}
/**
* Creates an instance of PagedList from a {@link PagedIterable}.
*
* @param pagedIterable the {@link PagedIterable} object.
*/
public PagedList(PagedIterable<E> pagedIterable) {
items = new ArrayList<>();
Objects.requireNonNull(pagedIterable, "'pagedIterable' cannot be null.");
this.pagedResponseIterator = pagedIterable.iterableByPage().iterator();
}
/**
* If there are more pages available.
*
* @return true if there are more pages to load. False otherwise.
*/
protected boolean hasNextPage() {
return pagedResponseIterator.hasNext();
}
/**
* Loads a page from next page link.
* The exceptions are wrapped into Java Runtime exceptions.
*/
/**
* Keep loading the next page from the next page link until all items are loaded.
*/
public void loadAll() {
while (hasNextPage()) {
loadNextPage();
}
}
@Override
public int size() {
loadAll();
return items.size();
}
@Override
public boolean isEmpty() {
return items.isEmpty() && !hasNextPage();
}
@Override
public boolean contains(Object o) {
return indexOf(o) >= 0;
}
@Override
public Iterator<E> iterator() {
return new ListItr(0);
}
@Override
public Object[] toArray() {
loadAll();
return items.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
loadAll();
return items.toArray(a);
}
@Override
public boolean add(E e) {
loadAll();
return items.add(e);
}
@Override
public boolean remove(Object o) {
int index = indexOf(o);
if (index != -1) {
items.remove(index);
return true;
} else {
return false;
}
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object e : c) {
if (!contains(e)) {
return false;
}
}
return true;
}
@Override
public boolean addAll(Collection<? extends E> c) {
return items.addAll(c);
}
@Override
public boolean addAll(int index, Collection<? extends E> c) {
return items.addAll(index, c);
}
@Override
public boolean removeAll(Collection<?> c) {
return items.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return items.retainAll(c);
}
@Override
public void clear() {
items.clear();
pagedResponseIterator = Collections.emptyIterator();
}
@Override
public E get(int index) {
tryLoadToIndex(index);
return items.get(index);
}
@Override
public E set(int index, E element) {
tryLoadToIndex(index);
return items.set(index, element);
}
@Override
public void add(int index, E element) {
items.add(index, element);
}
@Override
public E remove(int index) {
tryLoadToIndex(index);
return items.remove(index);
}
@Override
public int indexOf(Object o) {
int index = items.indexOf(o);
if (index != -1) {
return index;
}
while (hasNextPage()) {
int itemsSize = items.size();
List<E> nextPageItems = pagedResponseIterator.next().getValue();
this.items.addAll(nextPageItems);
index = nextPageItems.indexOf(o);
if (index != -1) {
index = itemsSize + index;
return index;
}
}
return -1;
}
@Override
public int lastIndexOf(Object o) {
loadAll();
return items.lastIndexOf(o);
}
@Override
public ListIterator<E> listIterator() {
return new ListItr(0);
}
@Override
public ListIterator<E> listIterator(int index) {
tryLoadToIndex(index);
return new ListItr(index);
}
@Override
public List<E> subList(int fromIndex, int toIndex) {
while ((fromIndex >= items.size() || toIndex >= items.size()) && hasNextPage()) {
loadNextPage();
}
return items.subList(fromIndex, toIndex);
}
private void tryLoadToIndex(int index) {
while (index >= items.size() && hasNextPage()) {
loadNextPage();
}
}
/**
* The implementation of {@link ListIterator} for PagedList.
*/
private class ListItr implements ListIterator<E> {
/**
* index of next element to return.
*/
private int nextIndex;
/**
* index of last element returned; -1 if no such action happened.
*/
private int lastRetIndex = -1;
/**
* Creates an instance of the ListIterator.
*
* @param index the position in the list to start.
*/
ListItr(int index) {
this.nextIndex = index;
}
@Override
public boolean hasNext() {
return this.nextIndex != items.size() || hasNextPage();
}
@Override
public E next() {
if (this.nextIndex >= items.size()) {
if (!hasNextPage()) {
throw new NoSuchElementException();
} else {
loadNextPage();
}
return next();
} else {
try {
E nextItem = items.get(this.nextIndex);
this.lastRetIndex = this.nextIndex;
this.nextIndex = this.nextIndex + 1;
return nextItem;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void remove() {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.remove(this.lastRetIndex);
this.nextIndex = this.lastRetIndex;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public boolean hasPrevious() {
return this.nextIndex != 0;
}
@Override
public E previous() {
int i = this.nextIndex - 1;
if (i < 0) {
throw new NoSuchElementException();
} else if (i >= items.size()) {
throw new ConcurrentModificationException();
} else {
try {
this.nextIndex = i;
this.lastRetIndex = i;
return items.get(this.lastRetIndex);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public int nextIndex() {
return this.nextIndex;
}
@Override
public int previousIndex() {
return this.nextIndex - 1;
}
@Override
public void set(E e) {
if (this.lastRetIndex < 0) {
throw new IllegalStateException();
} else {
try {
items.set(this.lastRetIndex, e);
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
@Override
public void add(E e) {
try {
items.add(this.nextIndex, e);
this.nextIndex = this.nextIndex + 1;
this.lastRetIndex = -1;
} catch (IndexOutOfBoundsException ex) {
throw new ConcurrentModificationException();
}
}
}
} |
Good catch... | void peekMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).thenMany(receiver.peek()))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
} | StepVerifier.create(sender.send(message).thenMany(receiver.peek())) | void peekMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).then(receiver.peek()))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sender = createBuilder().buildAsyncSenderClient();
}
@Override
protected void afterTest() {
dispose(receiver, sender);
}
/**
* Verifies that we can send and receive a message.
*/
@Test
void receiveMessageAutoComplete() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).thenMany(receiver.receive().take(1)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a message.
*/
@Test
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ReceiveMessageOptions receiveMessageOptions;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
receiveMessageOptions = new ReceiveMessageOptions().setAutoComplete(true);
}
@Override
protected void beforeTest() {
sender = createBuilder().buildAsyncSenderClient();
receiver = createBuilder()
.receiveMessageOptions(receiveMessageOptions)
.buildAsyncReceiverClient();
}
@Override
protected void afterTest() {
dispose(receiver, sender);
}
/**
* Verifies that we can send and receive a message.
*/
@Test
void receiveMessageAutoComplete() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).thenMany(receiver.receive().take(1)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a message.
*/
@Test
/**
* Verifies that we can send and peek a message.
*/
@Test
void peekFromSequencenumberMessage() {
final long fromSequenceNumber = 1;
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).then(receiver.peek(fromSequenceNumber)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
} |
I have tested with 4 peek() calls from one client , since we are storing `ServiceBusManagementNode` in `ConcurrentHashMap` and return cached managementNode after once created using `managementNodes.computeIfAbsent` . So it creates `ServiceBusManagementNode` only once. https://github.com/Azure/azure-sdk-for-java/pull/8983/files#diff-6460b86560d643d02d2e58af8b6213cfL96 | public Mono<ServiceBusManagementNode> getManagementNode(String entityPath) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get management instance for '%s'",
connectionId, entityPath))));
}
final ServiceBusManagementNode existing = managementNodes.get(entityPath);
if (existing != null) {
return Mono.just(existing);
}
return getReactorConnection().then(
Mono.fromCallable(() -> {
final ServiceBusManagementNode node = managementNodes.computeIfAbsent(entityPath, key -> {
final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME;
final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME;
final String address = entityPath + "/" + MANAGEMENT_ADDRESS;
logger.info("Creating management node. entityPath: [{}]. address: [{}]. linkName: [{}]",
entityPath, address, linkName);
TokenManager cbsBasedTokenManager = new AzureTokenManagerProvider(
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, fullyQualifiedNamespace, entityPath)
.getTokenManager(getClaimsBasedSecurityNode(), entityPath) ;
final Mono<RequestResponseChannel> requestResponseChannel =
createRequestResponseChannel(sessionName, linkName, address);
return new ManagementChannel(requestResponseChannel, entityPath, tokenCredential,
tokenManagerProvider, messageSerializer, scheduler, cbsBasedTokenManager);
});
return node;
}));
} | TokenManager cbsBasedTokenManager = new AzureTokenManagerProvider( | public Mono<ServiceBusManagementNode> getManagementNode(String entityPath) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get management instance for '%s'",
connectionId, entityPath))));
}
final ServiceBusManagementNode existing = managementNodes.get(entityPath);
if (existing != null) {
return Mono.just(existing);
}
return getReactorConnection().then(
Mono.fromCallable(() -> {
final ServiceBusManagementNode node = managementNodes.computeIfAbsent(entityPath, key -> {
final String sessionName = entityPath + "-" + MANAGEMENT_SESSION_NAME;
final String linkName = entityPath + "-" + MANAGEMENT_LINK_NAME;
final String address = entityPath + "/" + MANAGEMENT_ADDRESS;
logger.info("Creating management node. entityPath: [{}]. address: [{}]. linkName: [{}]",
entityPath, address, linkName);
TokenManager cbsBasedTokenManager = new AzureTokenManagerProvider(
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, fullyQualifiedNamespace, entityPath)
.getTokenManager(getClaimsBasedSecurityNode(), entityPath);
final Mono<RequestResponseChannel> requestResponseChannel =
createRequestResponseChannel(sessionName, linkName, address);
return new ManagementChannel(requestResponseChannel, messageSerializer, scheduler,
cbsBasedTokenManager);
});
return node;
}));
} | class ServiceBusReactorAmqpConnection extends ReactorConnection implements ServiceBusAmqpConnection {
private static final String MANAGEMENT_SESSION_NAME = "mgmt-session";
private static final String MANAGEMENT_LINK_NAME = "mgmt";
private static final String MANAGEMENT_ADDRESS = "$management";
/** This is used in setting up management chhannel and it is always fixed.
* This is not used by sdk user trying to receive/send messages.*/
public static final SenderSettleMode MANAGEMENT_SEND_SETTLE_MODE = SenderSettleMode.SETTLED;
public static final ReceiverSettleMode MANAGEMENT_RECEIVE_SETTLE_MODE = ReceiverSettleMode.FIRST;
private final ClientLogger logger = new ClientLogger(ServiceBusReactorAmqpConnection.class);
/**
* Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service
* load balance messages is the eventHubName.
*/
private final ConcurrentHashMap<String, AmqpSendLink> sendLinks = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, ServiceBusManagementNode> managementNodes = new ConcurrentHashMap<>();
private final String connectionId;
private final ReactorProvider reactorProvider;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final AmqpRetryOptions retryOptions;
private final MessageSerializer messageSerializer;
private final TokenCredential tokenCredential;
private final Scheduler scheduler;
private final String fullyQualifiedNamespace;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides a token manager for authorizing with CBS node.
* @param messageSerializer Serializes and deserializes proton-j messages.
*/
public ServiceBusReactorAmqpConnection(String connectionId, ConnectionOptions connectionOptions,
ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider,
TokenManagerProvider tokenManagerProvider,
MessageSerializer messageSerializer, String product, String clientVersion) {
super(connectionId, connectionOptions, reactorProvider, handlerProvider, tokenManagerProvider,
messageSerializer, product, clientVersion,
MANAGEMENT_SEND_SETTLE_MODE, MANAGEMENT_RECEIVE_SETTLE_MODE);
this.connectionId = connectionId;
this.reactorProvider = reactorProvider;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = tokenManagerProvider;
this.retryOptions = connectionOptions.getRetry();
this.messageSerializer = messageSerializer;
this.tokenCredential = connectionOptions.getTokenCredential();
this.scheduler = connectionOptions.getScheduler();
this.fullyQualifiedNamespace = connectionOptions.getFullyQualifiedNamespace();
}
@Override
/**
* Creates or gets a send link. The same link is returned if there is an existing send link with the same {@code
* linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param retryOptions Options to use when creating the link.
* @return A new or existing send link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpSendLink> createSendLink(String linkName, String entityPath, AmqpRetryOptions retryOptions) {
return createSession(entityPath).flatMap(session -> {
logger.verbose("Get or create producer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createProducer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy)
.cast(AmqpSendLink.class);
});
}
/**
* Creates or gets an existing receive link. The same link is returned if there is an existing receive link with the
* same {@code linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param receiveMode Consumer options to use when creating the link.
* @return A new or existing receive link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpReceiveLink> createReceiveLink(String linkName, String entityPath, ReceiveMode receiveMode) {
return createSession(entityPath).cast(ServiceBusSession.class)
.flatMap(session -> {
logger.verbose("Get or create consumer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createConsumer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy,
receiveMode);
});
}
@Override
public void dispose() {
logger.info("Disposing of connection.");
sendLinks.forEach((key, value) -> value.dispose());
sendLinks.clear();
super.dispose();
}
@Override
protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new ServiceBusReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, retryOptions.getTryTimeout(), messageSerializer);
}
} | class ServiceBusReactorAmqpConnection extends ReactorConnection implements ServiceBusAmqpConnection {
private static final String MANAGEMENT_SESSION_NAME = "mgmt-session";
private static final String MANAGEMENT_LINK_NAME = "mgmt";
private static final String MANAGEMENT_ADDRESS = "$management";
private final ClientLogger logger = new ClientLogger(ServiceBusReactorAmqpConnection.class);
/**
* Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service
* load balance messages is the eventHubName.
*/
private final ConcurrentHashMap<String, AmqpSendLink> sendLinks = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, ServiceBusManagementNode> managementNodes = new ConcurrentHashMap<>();
private final String connectionId;
private final ReactorProvider reactorProvider;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final AmqpRetryOptions retryOptions;
private final MessageSerializer messageSerializer;
private final TokenCredential tokenCredential;
private final Scheduler scheduler;
private final String fullyQualifiedNamespace;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides a token manager for authorizing with CBS node.
* @param messageSerializer Serializes and deserializes proton-j messages.
*/
public ServiceBusReactorAmqpConnection(String connectionId, ConnectionOptions connectionOptions,
ReactorProvider reactorProvider, ReactorHandlerProvider handlerProvider,
TokenManagerProvider tokenManagerProvider,
MessageSerializer messageSerializer, String product, String clientVersion) {
super(connectionId, connectionOptions, reactorProvider, handlerProvider, tokenManagerProvider,
messageSerializer, product, clientVersion,
SenderSettleMode.SETTLED, ReceiverSettleMode.FIRST);
this.connectionId = connectionId;
this.reactorProvider = reactorProvider;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = tokenManagerProvider;
this.retryOptions = connectionOptions.getRetry();
this.messageSerializer = messageSerializer;
this.tokenCredential = connectionOptions.getTokenCredential();
this.scheduler = connectionOptions.getScheduler();
this.fullyQualifiedNamespace = connectionOptions.getFullyQualifiedNamespace();
}
@Override
/**
* Creates or gets a send link. The same link is returned if there is an existing send link with the same {@code
* linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param retryOptions Options to use when creating the link.
* @return A new or existing send link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpSendLink> createSendLink(String linkName, String entityPath, AmqpRetryOptions retryOptions) {
return createSession(entityPath).flatMap(session -> {
logger.verbose("Get or create producer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createProducer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy)
.cast(AmqpSendLink.class);
});
}
/**
* Creates or gets an existing receive link. The same link is returned if there is an existing receive link with the
* same {@code linkName}. Otherwise, a new link is created and returned.
*
* @param linkName The name of the link.
* @param entityPath The remote address to connect to for the message broker.
* @param receiveMode Consumer options to use when creating the link.
* @return A new or existing receive link that is connected to the given {@code entityPath}.
*/
@Override
public Mono<AmqpReceiveLink> createReceiveLink(String linkName, String entityPath, ReceiveMode receiveMode) {
return createSession(entityPath).cast(ServiceBusSession.class)
.flatMap(session -> {
logger.verbose("Get or create consumer for path: '{}'", entityPath);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(retryOptions);
return session.createConsumer(linkName, entityPath, retryOptions.getTryTimeout(), retryPolicy,
receiveMode);
});
}
@Override
public void dispose() {
logger.info("Disposing of connection.");
sendLinks.forEach((key, value) -> value.dispose());
sendLinks.clear();
super.dispose();
}
@Override
protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new ServiceBusReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, retryOptions.getTryTimeout(), messageSerializer);
}
} |
added this test | void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
} | StepVerifier.create(consumer.peek()) | void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(Mono.just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(Mono.just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this peek one messages.
*/
@Test
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
Mockito.framework().clearInlineMocks();
}
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage message1;
@Mock
private ServiceBusReceivedMessage message2;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
/* Arrange */
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(message1), just(message2));
StepVerifier.create(consumer.peek())
.expectNext(message1)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(message2)
.verifyComplete();
}
/**
* Verifies that this peek one messages.
*/
@Test
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} |
You should dispose of the existing one (created in BeforeEach) if you are going to recreate it. It'll still consume resources. | void peekMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).then(receiver.peek()))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
} | receiver = createBuilder() | void peekMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).then(receiver.peek()))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sender = createBuilder().buildAsyncSenderClient();
}
@Override
protected void afterTest() {
dispose(receiver, sender);
}
/**
* Verifies that we can send and receive a message.
*/
@Test
void receiveMessageAutoComplete() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).thenMany(receiver.receive().take(1)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a message.
*/
@Test
/**
* Verifies that we can send and peek a message.
*/
@Test
void peekFromSequencenumberMessage() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
final ReceiveMessageOptions options = new ReceiveMessageOptions().setAutoComplete(true);
final long fromSequenceNumber = 1;
receiver = createBuilder()
.receiveMessageOptions(options)
.buildAsyncReceiverClient();
StepVerifier.create(sender.send(message).then(receiver.peek(fromSequenceNumber)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private ReceiveMessageOptions receiveMessageOptions;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
receiveMessageOptions = new ReceiveMessageOptions().setAutoComplete(true);
}
@Override
protected void beforeTest() {
sender = createBuilder().buildAsyncSenderClient();
receiver = createBuilder()
.receiveMessageOptions(receiveMessageOptions)
.buildAsyncReceiverClient();
}
@Override
protected void afterTest() {
dispose(receiver, sender);
}
/**
* Verifies that we can send and receive a message.
*/
@Test
void receiveMessageAutoComplete() {
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).thenMany(receiver.receive().take(1)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a message.
*/
@Test
/**
* Verifies that we can send and peek a message.
*/
@Test
void peekFromSequencenumberMessage() {
final long fromSequenceNumber = 1;
final String messageId = UUID.randomUUID().toString();
final String contents = "Some-contents";
final ServiceBusMessage message = TestUtils.getServiceBusMessage(contents, messageId, 0);
StepVerifier.create(sender.send(message).then(receiver.peek(fromSequenceNumber)))
.assertNext(receivedMessage -> {
Assertions.assertEquals(contents, receivedMessage.getBodyAsString());
Assertions.assertTrue(receivedMessage.getProperties().containsKey(MESSAGE_TRACKING_ID));
Assertions.assertEquals(messageId, receivedMessage.getProperties().get(MESSAGE_TRACKING_ID));
})
.verifyComplete();
}
} |
Should put this in AfterEach, if the test fails at line 163, it'll never be run. | void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
Mockito.framework().clearInlineMocks();
} | Mockito.framework().clearInlineMocks(); | void peekWithSequenceOneMessage() {
final int numberOfEvents = 1;
final int fromSequenceNumber = 10;
when(managementNode.peek(fromSequenceNumber))
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek(fromSequenceNumber))
.expectNextCount(numberOfEvents)
.verifyComplete();
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(Mono.just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(Mono.just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that this peek one messages.
*/
@Test
void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(Mono.just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} | class ServiceBusReceiverAsyncClientTest {
private static final String PAYLOAD = "hello";
private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8);
private static final int PREFETCH = 5;
private static final String NAMESPACE = "my-namespace-foo";
private static final String ENTITY_NAME = "queue-name";
private final String messageTrackingUUID = UUID.randomUUID().toString();
private final DirectProcessor<AmqpEndpointState> endpointProcessor = DirectProcessor.create();
private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER);
private final DirectProcessor<Message> messageProcessor = DirectProcessor.create();
@Mock
private AmqpReceiveLink amqpReceiveLink;
@Mock
private ServiceBusAmqpConnection connection;
@Mock
private TokenCredential tokenCredential;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TracerProvider tracerProvider;
@Mock
private ServiceBusManagementNode managementNode;
@Mock
private ServiceBusReceivedMessage message1;
@Mock
private ServiceBusReceivedMessage message2;
private ServiceBusReceiverAsyncClient consumer;
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(10));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single()));
when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor);
when(messageSerializer.deserialize(any(), argThat(ServiceBusReceivedMessage.class::equals)))
.thenAnswer(invocation -> {
return mock(ServiceBusReceivedMessage.class);
});
ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential,
CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(),
ProxyOptions.SYSTEM_DEFAULTS, Schedulers.parallel());
when(connection.getEndpointStates()).thenReturn(endpointProcessor);
endpointSink.next(AmqpEndpointState.ACTIVE);
when(connection.createReceiveLink(anyString(), anyString(),
any(ReceiveMode.class))).thenReturn(just(amqpReceiveLink));
when(connection.getManagementNode(anyString())).thenReturn(just(managementNode));
ServiceBusConnectionProcessor connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection))
.subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(),
ENTITY_NAME, connectionOptions.getRetry()));
ReceiveMessageOptions receiveOptions = new ReceiveMessageOptions().setPrefetchCount(PREFETCH);
consumer = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_NAME, connectionProcessor, tracerProvider,
messageSerializer, receiveOptions);
}
@AfterEach
void teardown() {
Mockito.framework().clearInlineMocks();
consumer.close();
}
/**
* Verifies that when user calls peek more than one time, It returns different object.
*/
@SuppressWarnings("unchecked")
@Test
void peekTwoMessages() {
/* Arrange */
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(message1), just(message2));
StepVerifier.create(consumer.peek())
.expectNext(message1)
.verifyComplete();
StepVerifier.create(consumer.peek())
.expectNext(message2)
.verifyComplete();
}
/**
* Verifies that this peek one messages.
*/
@Test
void peekOneMessage() {
final int numberOfEvents = 1;
when(managementNode.peek())
.thenReturn(just(mock(ServiceBusReceivedMessage.class)));
StepVerifier.create(consumer.peek())
.expectNextCount(numberOfEvents)
.verifyComplete();
}
/**
* Verifies that this peek one messages from a sequence Number.
*/
@Test
/**
* Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the
* prefetch value.
*/
@Test
void receivesNumberOfEvents() {
final int numberOfEvents = 1;
StepVerifier.create(consumer.receive().take(numberOfEvents))
.then(() -> sendMessages(messageProcessor.sink(), numberOfEvents))
.expectNextCount(numberOfEvents)
.verifyComplete();
verify(amqpReceiveLink, times(1)).addCredits(PREFETCH);
}
private void sendMessages(FluxSink<Message> sink, int numberOfEvents) {
Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo");
for (int i = 0; i < numberOfEvents; i++) {
Message message = getMessage(PAYLOAD_BYTES, messageTrackingUUID, map);
sink.next(message);
}
}
} |
This can be the diamond operator on the right hand side | private EndpointCache getOrAddEndpoint(URI endpoint) {
EndpointCache endpointCache = this.addressCacheByEndpoint.computeIfAbsent(endpoint , key -> {
GatewayAddressCache gatewayAddressCache = new GatewayAddressCache(endpoint, protocol, this.tokenProvider, this.userAgentContainer, this.httpClient);
AddressResolver addressResolver = new AddressResolver();
addressResolver.initializeCaches(this.collectionCache, this.routingMapProvider, gatewayAddressCache);
EndpointCache cache = new EndpointCache();
cache.addressCache = gatewayAddressCache;
cache.addressResolver = addressResolver;
return cache;
});
if (this.addressCacheByEndpoint.size() > this.maxEndpoints) {
List<URI> allEndpoints = new ArrayList<URI>(this.endpointManager.getWriteEndpoints());
allEndpoints.addAll(this.endpointManager.getReadEndpoints());
Collections.reverse(allEndpoints);
LinkedList<URI> endpoints = new LinkedList<>(allEndpoints);
while (this.addressCacheByEndpoint.size() > this.maxEndpoints) {
if (endpoints.size() > 0) {
URI dequeueEnpoint = endpoints.pop();
if (this.addressCacheByEndpoint.get(dequeueEnpoint) != null) {
this.addressCacheByEndpoint.remove(dequeueEnpoint);
}
} else {
break;
}
}
}
return endpointCache;
} | List<URI> allEndpoints = new ArrayList<URI>(this.endpointManager.getWriteEndpoints()); | private EndpointCache getOrAddEndpoint(URI endpoint) {
EndpointCache endpointCache = this.addressCacheByEndpoint.computeIfAbsent(endpoint , key -> {
GatewayAddressCache gatewayAddressCache = new GatewayAddressCache(endpoint, protocol, this.tokenProvider, this.userAgentContainer, this.httpClient);
AddressResolver addressResolver = new AddressResolver();
addressResolver.initializeCaches(this.collectionCache, this.routingMapProvider, gatewayAddressCache);
EndpointCache cache = new EndpointCache();
cache.addressCache = gatewayAddressCache;
cache.addressResolver = addressResolver;
return cache;
});
if (this.addressCacheByEndpoint.size() > this.maxEndpoints) {
List<URI> allEndpoints = new ArrayList<>(this.endpointManager.getWriteEndpoints());
allEndpoints.addAll(this.endpointManager.getReadEndpoints());
Collections.reverse(allEndpoints);
LinkedList<URI> endpoints = new LinkedList<>(allEndpoints);
while (this.addressCacheByEndpoint.size() > this.maxEndpoints) {
if (endpoints.size() > 0) {
URI dequeueEnpoint = endpoints.pop();
if (this.addressCacheByEndpoint.get(dequeueEnpoint) != null) {
this.addressCacheByEndpoint.remove(dequeueEnpoint);
}
} else {
break;
}
}
}
return endpointCache;
} | class GlobalAddressResolver implements IAddressResolver {
private final static int MaxBackupReadRegions = 3;
private final GlobalEndpointManager endpointManager;
private final Protocol protocol;
private final IAuthorizationTokenProvider tokenProvider;
private final UserAgentContainer userAgentContainer;
private final RxCollectionCache collectionCache;
private final RxPartitionKeyRangeCache routingMapProvider;
private final int maxEndpoints;
private final GatewayServiceConfigurationReader serviceConfigReader;
final Map<URI, EndpointCache> addressCacheByEndpoint;
private GatewayAddressCache gatewayAddressCache;
private AddressResolver addressResolver;
private HttpClient httpClient;
public GlobalAddressResolver(
HttpClient httpClient,
GlobalEndpointManager endpointManager,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
RxCollectionCache collectionCache,
RxPartitionKeyRangeCache routingMapProvider,
UserAgentContainer userAgentContainer,
GatewayServiceConfigurationReader serviceConfigReader,
ConnectionPolicy connectionPolicy) {
this.httpClient = httpClient;
this.endpointManager = endpointManager;
this.protocol = protocol;
this.tokenProvider = tokenProvider;
this.userAgentContainer = userAgentContainer;
this.collectionCache = collectionCache;
this.routingMapProvider = routingMapProvider;
this.serviceConfigReader = serviceConfigReader;
int maxBackupReadEndpoints = (connectionPolicy.isReadRequestsFallbackEnabled() == null || connectionPolicy.isReadRequestsFallbackEnabled()) ? GlobalAddressResolver.MaxBackupReadRegions : 0;
this.maxEndpoints = maxBackupReadEndpoints + 2;
this.addressCacheByEndpoint = new ConcurrentHashMap<>();
for (URI endpoint : endpointManager.getWriteEndpoints()) {
this.getOrAddEndpoint(endpoint);
}
for (URI endpoint : endpointManager.getReadEndpoints()) {
this.getOrAddEndpoint(endpoint);
}
}
Mono<Void> openAsync(DocumentCollection collection) {
Mono<Utils.ValueHolder<CollectionRoutingMap>> routingMap = this.routingMapProvider.tryLookupAsync(collection.getId(), null, null);
return routingMap.flatMap(collectionRoutingMap -> {
if ( collectionRoutingMap.v == null) {
return Mono.empty();
}
List<PartitionKeyRangeIdentity> ranges = collectionRoutingMap.v.getOrderedPartitionKeyRanges().stream().map(range ->
new PartitionKeyRangeIdentity(collection.getResourceId(), range.getId())).collect(Collectors.toList());
List<Mono<Void>> tasks = new ArrayList<>();
for (EndpointCache endpointCache : this.addressCacheByEndpoint.values()) {
tasks.add(endpointCache.addressCache.openAsync(collection, ranges));
}
@SuppressWarnings({"rawtypes", "unchecked"})
Mono<Void>[] array = new Mono[this.addressCacheByEndpoint.values().size()];
return Flux.mergeDelayError(Queues.SMALL_BUFFER_SIZE, tasks.toArray(array)).then();
});
}
@Override
public Mono<AddressInformation[]> resolveAsync(RxDocumentServiceRequest request, boolean forceRefresh) {
IAddressResolver resolver = this.getAddressResolver(request);
return resolver.resolveAsync(request, forceRefresh);
}
public void dispose() {
for (EndpointCache endpointCache : this.addressCacheByEndpoint.values()) {
endpointCache.addressCache.dispose();
}
}
private IAddressResolver getAddressResolver(RxDocumentServiceRequest rxDocumentServiceRequest) {
URI endpoint = this.endpointManager.resolveServiceEndpoint(rxDocumentServiceRequest);
return this.getOrAddEndpoint(endpoint).addressResolver;
}
static class EndpointCache {
GatewayAddressCache addressCache;
AddressResolver addressResolver;
}
} | class GlobalAddressResolver implements IAddressResolver {
private final static int MaxBackupReadRegions = 3;
private final GlobalEndpointManager endpointManager;
private final Protocol protocol;
private final IAuthorizationTokenProvider tokenProvider;
private final UserAgentContainer userAgentContainer;
private final RxCollectionCache collectionCache;
private final RxPartitionKeyRangeCache routingMapProvider;
private final int maxEndpoints;
private final GatewayServiceConfigurationReader serviceConfigReader;
final Map<URI, EndpointCache> addressCacheByEndpoint;
private GatewayAddressCache gatewayAddressCache;
private AddressResolver addressResolver;
private HttpClient httpClient;
public GlobalAddressResolver(
HttpClient httpClient,
GlobalEndpointManager endpointManager,
Protocol protocol,
IAuthorizationTokenProvider tokenProvider,
RxCollectionCache collectionCache,
RxPartitionKeyRangeCache routingMapProvider,
UserAgentContainer userAgentContainer,
GatewayServiceConfigurationReader serviceConfigReader,
ConnectionPolicy connectionPolicy) {
this.httpClient = httpClient;
this.endpointManager = endpointManager;
this.protocol = protocol;
this.tokenProvider = tokenProvider;
this.userAgentContainer = userAgentContainer;
this.collectionCache = collectionCache;
this.routingMapProvider = routingMapProvider;
this.serviceConfigReader = serviceConfigReader;
int maxBackupReadEndpoints = (connectionPolicy.isReadRequestsFallbackEnabled() == null || connectionPolicy.isReadRequestsFallbackEnabled()) ? GlobalAddressResolver.MaxBackupReadRegions : 0;
this.maxEndpoints = maxBackupReadEndpoints + 2;
this.addressCacheByEndpoint = new ConcurrentHashMap<>();
for (URI endpoint : endpointManager.getWriteEndpoints()) {
this.getOrAddEndpoint(endpoint);
}
for (URI endpoint : endpointManager.getReadEndpoints()) {
this.getOrAddEndpoint(endpoint);
}
}
Mono<Void> openAsync(DocumentCollection collection) {
Mono<Utils.ValueHolder<CollectionRoutingMap>> routingMap = this.routingMapProvider.tryLookupAsync(collection.getId(), null, null);
return routingMap.flatMap(collectionRoutingMap -> {
if ( collectionRoutingMap.v == null) {
return Mono.empty();
}
List<PartitionKeyRangeIdentity> ranges = collectionRoutingMap.v.getOrderedPartitionKeyRanges().stream().map(range ->
new PartitionKeyRangeIdentity(collection.getResourceId(), range.getId())).collect(Collectors.toList());
List<Mono<Void>> tasks = new ArrayList<>();
for (EndpointCache endpointCache : this.addressCacheByEndpoint.values()) {
tasks.add(endpointCache.addressCache.openAsync(collection, ranges));
}
@SuppressWarnings({"rawtypes", "unchecked"})
Mono<Void>[] array = new Mono[this.addressCacheByEndpoint.values().size()];
return Flux.mergeDelayError(Queues.SMALL_BUFFER_SIZE, tasks.toArray(array)).then();
});
}
@Override
public Mono<AddressInformation[]> resolveAsync(RxDocumentServiceRequest request, boolean forceRefresh) {
IAddressResolver resolver = this.getAddressResolver(request);
return resolver.resolveAsync(request, forceRefresh);
}
public void dispose() {
for (EndpointCache endpointCache : this.addressCacheByEndpoint.values()) {
endpointCache.addressCache.dispose();
}
}
private IAddressResolver getAddressResolver(RxDocumentServiceRequest rxDocumentServiceRequest) {
URI endpoint = this.endpointManager.resolveServiceEndpoint(rxDocumentServiceRequest);
return this.getOrAddEndpoint(endpoint).addressResolver;
}
static class EndpointCache {
GatewayAddressCache addressCache;
AddressResolver addressResolver;
}
} |
this can be simplified to `verify(asyncSender).createBatch();` | void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender, times(1)).createBatch();
} | verify(asyncSender, times(1)).createBatch(); | void createBatchDefault() {
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(MAX_MESSAGE_LENGTH_BYTES, null, null,
null);
when(asyncSender.createBatch()).thenReturn(Mono.just(batch));
ServiceBusMessageBatch batchMessage = sender.createBatch();
Assertions.assertEquals(MAX_MESSAGE_LENGTH_BYTES, batchMessage.getMaxSizeInBytes());
Assertions.assertEquals(0, batchMessage.getCount());
verify(asyncSender).createBatch();
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ErrorContextProvider errorContextProvider;
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
@Captor
private ArgumentCaptor<ServiceBusMessageBatch> messageBatchCaptor;
private MessageSerializer serializer = new ServiceBusMessageSerializer();
private TracerProvider tracerProvider = new TracerProvider(Collections.emptyList());
private final MessageSerializer messageSerializer = new ServiceBusMessageSerializer();
private final AmqpRetryOptions retryOptions = new AmqpRetryOptions()
.setDelay(Duration.ofMillis(500))
.setMode(AmqpRetryMode.FIXED)
.setTryTimeout(Duration.ofSeconds(10));
private ServiceBusSenderClient sender;
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, retryOptions.getTryTimeout());
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
messageBatchCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
try {
sender.createBatch(options);
Assertions.fail("Should not have created batch because batchSize is bigger than the size on SenderLink.");
} catch (Exception ex) {
Assertions.assertTrue(ex instanceof IllegalArgumentException);
}
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
void createsMessageBatchWithSize() {
int maxLinkSize = 10000;
int batchSize = 1024;
int eventOverhead = 46;
int maxEventPayload = batchSize - eventOverhead;
final ServiceBusMessage message = new ServiceBusMessage(new byte[maxEventPayload]);
final ServiceBusMessage tooLargeMessage = new ServiceBusMessage(new byte[maxEventPayload + 1]);
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, tracerProvider,
messageSerializer);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batchSize, messageBatch.getMaxSizeInBytes());
Assertions.assertTrue(messageBatch.tryAdd(message));
Assertions.assertFalse(messageBatch.tryAdd(tooLargeMessage));
}
/**
* Verifies that sending multiple message will result in calling sender.send(MessageBatch).
*/
@Test
void sendMultipleMessages() {
final int count = 4;
final byte[] contents = TEST_CONTENTS.getBytes(UTF_8);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(256 * 1024,
errorContextProvider, tracerProvider, serializer);
IntStream.range(0, count).forEach(index -> {
final ServiceBusMessage message = new ServiceBusMessage(contents);
Assertions.assertTrue(batch.tryAdd(message));
});
when(asyncSender.send(batch)).thenReturn(Mono.empty());
sender.send(batch);
verify(asyncSender).send(messageBatchCaptor.capture());
final ServiceBusMessageBatch messagesSent = messageBatchCaptor.getValue();
Assertions.assertEquals(count, messagesSent.getCount());
messagesSent.getMessages().forEach(message -> Assertions.assertArrayEquals(contents, message.getBody()));
}
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} | class ServiceBusSenderClientTest {
private static final String NAMESPACE = "my-namespace";
private static final String ENTITY_NAME = "my-servicebus-entity";
@Mock
private ServiceBusSenderAsyncClient asyncSender;
@Captor
private ArgumentCaptor<ServiceBusMessage> singleMessageCaptor;
private ServiceBusSenderClient sender;
private static final Duration RETRY_TIMEOUT = Duration.ofSeconds(10);
private static final String TEST_CONTENTS = "My message for service bus queue!";
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(Duration.ofSeconds(30));
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@BeforeEach
void setup() {
MockitoAnnotations.initMocks(this);
when(asyncSender.getEntityPath()).thenReturn(ENTITY_NAME);
when(asyncSender.getFullyQualifiedNamespace()).thenReturn(NAMESPACE);
sender = new ServiceBusSenderClient(asyncSender, RETRY_TIMEOUT);
}
@AfterEach
void teardown() {
sender.close();
singleMessageCaptor = null;
Mockito.framework().clearInlineMocks();
}
@Test
void verifyProperties() {
Assertions.assertEquals(ENTITY_NAME, sender.getEntityPath());
Assertions.assertEquals(NAMESPACE, sender.getFullyQualifiedNamespace());
}
/**
* Verifies that an exception is thrown when we create a batch with null options.
*/
@Test
void createBatchNull() {
Assertions.assertThrows(NullPointerException.class, () -> sender.createBatch(null));
}
/**
* Verifies that the default batch is the same size as the message link.
*/
@Test
/**
* Verifies we cannot create a batch if the options size is larger than the link.
*/
@Test
void createBatchWhenSizeTooBigThanOnSendLink() {
int maxLinkSize = 1024;
int batchSize = maxLinkSize + 10;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
when(asyncSender.createBatch(options)).thenThrow(new IllegalArgumentException("too large size"));
Assertions.assertThrows(IllegalArgumentException.class, () -> sender.createBatch(options));
verify(asyncSender, times(1)).createBatch(options);
}
/**
* Verifies that the producer can create a batch with a given {@link CreateBatchOptions
*/
@Test
void createsMessageBatchWithSize() {
int batchSize = 1024;
final CreateBatchOptions options = new CreateBatchOptions().setMaximumSizeInBytes(batchSize);
final ServiceBusMessageBatch batch = new ServiceBusMessageBatch(batchSize, null, null,
null);
when(asyncSender.createBatch(options)).thenReturn(Mono.just(batch));
ServiceBusMessageBatch messageBatch = sender.createBatch(options);
Assertions.assertEquals(batch, messageBatch);
}
/**
* Verifies that sending a single message will result in calling sender.send(Message).
*/
@Test
void sendSingleMessage() {
final ServiceBusMessage testData =
new ServiceBusMessage(TEST_CONTENTS.getBytes(UTF_8));
when(asyncSender.send(testData)).thenReturn(Mono.empty());
sender.send(testData);
verify(asyncSender, times(1)).send(testData);
verify(asyncSender).send(singleMessageCaptor.capture());
final ServiceBusMessage message = singleMessageCaptor.getValue();
Assertions.assertArrayEquals(testData.getBody(), message.getBody());
}
} |
Don't introduce more guava dependencies! :-) Use the standard Java APIs for hashcode | public int hashCode() {
if (this.components == null || this.components.size() == 0) {
return 0;
}
int [] ordinals = new int[this.components.size()];
for (int i = 0; i < this.components.size(); i++) {
ordinals[i] = this.components.get(i).GetTypeOrdinal();
}
return Objects.hashCode(ordinals);
} | } | public int hashCode() {
return super.hashCode();
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} |
I think this should have T and `Class<T>` instead of Object | public static CosmosAsyncItemResponse<Object> createCosmosAsyncItemResponseWithObjectType(ResourceResponse<Document> response) {
return new CosmosAsyncItemResponse<>(response, Object.class);
} | return new CosmosAsyncItemResponse<>(response, Object.class); | public static CosmosAsyncItemResponse<Object> createCosmosAsyncItemResponseWithObjectType(ResourceResponse<Document> response) {
return new CosmosAsyncItemResponse<>(response, Object.class);
} | class ModelBridgeInternal {
public static CosmosAsyncConflictResponse createCosmosAsyncConflictResponse(ResourceResponse<Conflict> response,
CosmosAsyncContainer container) {
return new CosmosAsyncConflictResponse(response, container);
}
public static CosmosAsyncContainerResponse createCosmosAsyncContainerResponse(ResourceResponse<DocumentCollection> response,
CosmosAsyncDatabase database) {
return new CosmosAsyncContainerResponse(response, database);
}
public static CosmosAsyncDatabaseResponse createCosmosAsyncDatabaseResponse(ResourceResponse<Database> response,
CosmosAsyncClient client) {
return new CosmosAsyncDatabaseResponse(response, client);
}
public static <T> CosmosAsyncItemResponse<T> createCosmosAsyncItemResponse(ResourceResponse<Document> response, Class<T> classType) {
return new CosmosAsyncItemResponse<>(response, classType);
}
public static CosmosAsyncPermissionResponse createCosmosAsyncPermissionResponse(ResourceResponse<Permission> response,
CosmosAsyncUser cosmosUser) {
return new CosmosAsyncPermissionResponse(response, cosmosUser);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(ResourceResponse<StoredProcedure> response,
CosmosAsyncContainer cosmosContainer) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(StoredProcedureResponse response,
CosmosAsyncContainer cosmosContainer,
String storedProcedureId) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer, storedProcedureId);
}
public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) {
return new CosmosStoredProcedureProperties(jsonString);
}
public static CosmosAsyncTriggerResponse createCosmosAsyncTriggerResponse(ResourceResponse<Trigger> response,
CosmosAsyncContainer container) {
return new CosmosAsyncTriggerResponse(response, container);
}
public static CosmosAsyncUserDefinedFunctionResponse createCosmosAsyncUserDefinedFunctionResponse(ResourceResponse<UserDefinedFunction> response,
CosmosAsyncContainer container) {
return new CosmosAsyncUserDefinedFunctionResponse(response, container);
}
public static CosmosAsyncUserResponse createCosmosAsyncUserResponse(ResourceResponse<User> response, CosmosAsyncDatabase database) {
return new CosmosAsyncUserResponse(response, database);
}
public static CosmosContainerResponse createCosmosContainerResponse(CosmosAsyncContainerResponse response,
CosmosDatabase database, CosmosClient client) {
return new CosmosContainerResponse(response, database, client);
}
public static CosmosUserResponse createCosmosUserResponse(CosmosAsyncUserResponse response, CosmosDatabase database) {
return new CosmosUserResponse(response, database);
}
public static <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosAsyncItemResponse<T> response) {
return new CosmosItemResponse<>(response);
}
public static CosmosDatabaseResponse createCosmosDatabaseResponse(CosmosAsyncDatabaseResponse response, CosmosClient client) {
return new CosmosDatabaseResponse(response, client);
}
public static CosmosStoredProcedureResponse createCosmosStoredProcedureResponse(CosmosAsyncStoredProcedureResponse resourceResponse,
CosmosStoredProcedure storedProcedure) {
return new CosmosStoredProcedureResponse(resourceResponse, storedProcedure);
}
public static CosmosUserDefinedFunctionResponse createCosmosUserDefinedFunctionResponse(CosmosAsyncUserDefinedFunctionResponse resourceResponse,
CosmosUserDefinedFunction userDefinedFunction) {
return new CosmosUserDefinedFunctionResponse(resourceResponse, userDefinedFunction);
}
public static CosmosTriggerResponse createCosmosTriggerResponse(CosmosAsyncTriggerResponse asyncResponse,
CosmosTrigger syncTrigger) {
return new CosmosTriggerResponse(asyncResponse, syncTrigger);
}
public static List<CosmosConflictProperties> getCosmosConflictPropertiesFromV2Results(List<Conflict> results) {
return CosmosConflictProperties.getFromV2Results(results);
}
public static DocumentCollection getV2Collection(CosmosContainerProperties containerProperties) {
return containerProperties.getV2Collection();
}
public static List<CosmosContainerProperties> getCosmosContainerPropertiesFromV2Results(List<DocumentCollection> results) {
return CosmosContainerProperties.getFromV2Results(results);
}
public static List<CosmosDatabaseProperties> getCosmosDatabasePropertiesFromV2Results(List<Database> results) {
return CosmosDatabaseProperties.getFromV2Results(results);
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosAsyncItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static Permission getV2Permissions(CosmosPermissionProperties permissionSettings) {
return permissionSettings.getV2Permissions();
}
public static List<CosmosPermissionProperties> getCosmosPermissionPropertiesFromV2Results(List<Permission> results) {
return CosmosPermissionProperties.getFromV2Results(results);
}
public static List<CosmosStoredProcedureProperties> getCosmosStoredProcedurePropertiesFromV2Results(List<StoredProcedure> results) {
return CosmosStoredProcedureProperties.getFromV2Results(results);
}
public static List<CosmosTriggerProperties> getCosmosTriggerPropertiesFromV2Results(List<Trigger> results) {
return CosmosTriggerProperties.getFromV2Results(results);
}
public static List<CosmosUserDefinedFunctionProperties> getCosmosUserDefinedFunctionPropertiesFromV2Results(List<UserDefinedFunction> results) {
return CosmosUserDefinedFunctionProperties.getFromV2Results(results);
}
public static User getV2User(CosmosUserProperties cosmosUserProperties) {
return cosmosUserProperties.getV2User();
}
public static List<CosmosUserProperties> getCosmosUserPropertiesFromV2Results(List<User> results) {
return CosmosUserProperties.getFromV2Results(results);
}
public static RequestOptions toRequestOptions(CosmosConflictRequestOptions cosmosConflictRequestOptions) {
return cosmosConflictRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosContainerRequestOptions cosmosContainerRequestOptions) {
return cosmosContainerRequestOptions.toRequestOptions();
}
public static CosmosContainerRequestOptions setOfferThroughput(CosmosContainerRequestOptions cosmosContainerRequestOptions,
Integer offerThroughput) {
return cosmosContainerRequestOptions.setOfferThroughput(offerThroughput);
}
public static RequestOptions toRequestOptions(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions) {
return cosmosDatabaseRequestOptions.toRequestOptions();
}
public static CosmosDatabaseRequestOptions setOfferThroughput(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions,
Integer offerThroughput) {
return cosmosDatabaseRequestOptions.setOfferThroughput(offerThroughput);
}
public static CosmosItemRequestOptions setPartitionKey(CosmosItemRequestOptions cosmosItemRequestOptions,
PartitionKey partitionKey) {
return cosmosItemRequestOptions.setPartitionKey(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosItemRequestOptions cosmosItemRequestOptions) {
return cosmosItemRequestOptions.toRequestOptions();
}
public static CosmosItemRequestOptions createCosmosItemRequestOptions(PartitionKey partitionKey) {
return new CosmosItemRequestOptions(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosPermissionRequestOptions cosmosPermissionRequestOptions) {
return cosmosPermissionRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosStoredProcedureRequestOptions cosmosStoredProcedureRequestOptions) {
return cosmosStoredProcedureRequestOptions.toRequestOptions();
}
public static String getAddressesLink(DatabaseAccount databaseAccount) {
return databaseAccount.getAddressesLink();
}
public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) {
DatabaseAccount account = response.getResource(DatabaseAccount.class);
Map<String, String> responseHeader = response.getResponseHeaders();
account.setMaxMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB)));
account.setMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB)));
return account;
}
public static Map<String, Object> getQueryEngineConfiuration(DatabaseAccount databaseAccount) {
return databaseAccount.getQueryEngineConfiguration();
}
public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getReplicationPolicy();
}
public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getSystemReplicationPolicy();
}
public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getConsistencyPolicy();
}
/**
* Gets the partitionKeyRangeId.
*
* @param options the feed options
* @return the partitionKeyRangeId.
*/
public static String partitionKeyRangeIdInternal(FeedOptions options) {
return options.getPartitionKeyRangeIdInternal();
}
/**
* Sets the PartitionKeyRangeId.
*
* @param options the feed options
* @param partitionKeyRangeId the partition key range id
* @return the partitionKeyRangeId.
*/
public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) {
return options.setPartitionKeyRangeIdInternal(partitionKeyRangeId);
}
public static <T extends Resource> FeedResponse<T> toFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(response.getQueryResponse(cls), response.getResponseHeaders());
}
public static <T> FeedResponse<T> toFeedResponsePage(List<T> results, Map<String, String> headers, boolean noChanges) {
return new FeedResponse<>(results, headers, noChanges);
}
public static <T extends Resource> FeedResponse<T> toChaneFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls),
response.getResponseHeaders(), noChanges(response));
}
public static <T extends Resource> boolean noChanges(FeedResponse<T> page) {
return page.nochanges;
}
public static <T extends Resource> boolean noChanges(RxDocumentServiceResponse rsp) {
return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED;
}
public static <T> FeedResponse<T> createFeedResponse(List<T> results,
Map<String, String> headers) {
return new FeedResponse<>(results, headers);
}
public static <T> FeedResponse<T> createFeedResponseWithQueryMetrics(List<T> results,
Map<String, String> headers, ConcurrentMap<String, QueryMetrics> queryMetricsMap) {
return new FeedResponse<>(results, headers, queryMetricsMap);
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetricsMap(FeedResponse<T> feedResponse) {
return feedResponse.queryMetricsMap();
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetrics(FeedResponse<T> feedResponse) {
return feedResponse.queryMetrics();
}
public static String toLower(RequestVerb verb) {
return verb.toLowerCase();
}
public static boolean isV2(PartitionKeyDefinition pkd) {
return pkd.getVersion() != null && PartitionKeyDefinitionVersion.V2.val == pkd.getVersion().val;
}
public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getNonePartitionKeyValue();
}
public static PartitionKeyInternal getPartitionKeyInternal(PartitionKey partitionKey) {
return partitionKey.getInternalPartitionKey();
}
public static PartitionKey partitionKeyfromJsonString(String jsonString) {
return PartitionKey.fromJsonString(jsonString);
}
public static Object getPartitionKeyObject(PartitionKey right) {
return right.getKeyObject();
}
public static String getAltLink(Resource resource) {
return resource.getAltLink();
}
public static void setAltLink(Resource resource, String altLink) {
resource.setAltLink(altLink);
}
public static void setResourceSelfLink(Resource resource, String selfLink) {
resource.setSelfLink(selfLink);
}
public static void setTimestamp(Resource resource, OffsetDateTime date) {
resource.setTimestamp(date);
}
public static void validateResource(Resource resource) {
Resource.validateResource(resource);
}
public static <T> void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) {
jsonSerializable.set(propertyName, value);
}
public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) {
return jsonSerializable.getObject(propertyName);
}
public static void remove(JsonSerializable jsonSerializable, String propertyName) {
jsonSerializable.remove(propertyName);
}
public static Object getValue(JsonNode value) {
return JsonSerializable.getValue(value);
}
public static CosmosError createCosmosError(ObjectNode objectNode) {
return new CosmosError(objectNode);
}
public static CosmosError createCosmosError(String jsonString) {
return new CosmosError(jsonString);
}
public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) {
jsonSerializable.populatePropertyBag();
}
public static JsonSerializable instantiateJsonSerializable(ObjectNode objectNode, Class<?> klassType) {
try {
if (klassType.equals(Document.class) || klassType.equals(OrderByRowResult.class) || klassType.equals(CosmosItemProperties.class)
|| klassType.equals(PartitionKeyRange.class) || klassType.equals(Range.class)
|| klassType.equals(QueryInfo.class) || klassType.equals(PartitionedQueryExecutionInfoInternal.class)
|| klassType.equals(QueryItem.class)
|| klassType.equals(Address.class)
|| klassType.equals(DatabaseAccount.class) || klassType.equals(DatabaseAccountLocation.class)
|| klassType.equals(ReplicationPolicy.class) || klassType.equals(ConsistencyPolicy.class)
|| klassType.equals(DocumentCollection.class) || klassType.equals(Database.class)) {
return (JsonSerializable) klassType.getDeclaredConstructor(ObjectNode.class).newInstance(objectNode);
} else {
return (JsonSerializable) klassType.getDeclaredConstructor(String.class).newInstance(Utils.toJson(Utils.getSimpleObjectMapper(), objectNode));
}
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | IllegalArgumentException e) {
throw new IllegalArgumentException(e);
}
}
} | class ModelBridgeInternal {
public static CosmosAsyncConflictResponse createCosmosAsyncConflictResponse(ResourceResponse<Conflict> response,
CosmosAsyncContainer container) {
return new CosmosAsyncConflictResponse(response, container);
}
public static CosmosAsyncContainerResponse createCosmosAsyncContainerResponse(ResourceResponse<DocumentCollection> response,
CosmosAsyncDatabase database) {
return new CosmosAsyncContainerResponse(response, database);
}
public static CosmosAsyncDatabaseResponse createCosmosAsyncDatabaseResponse(ResourceResponse<Database> response,
CosmosAsyncClient client) {
return new CosmosAsyncDatabaseResponse(response, client);
}
public static <T> CosmosAsyncItemResponse<T> createCosmosAsyncItemResponse(ResourceResponse<Document> response, Class<T> classType) {
return new CosmosAsyncItemResponse<>(response, classType);
}
public static CosmosAsyncPermissionResponse createCosmosAsyncPermissionResponse(ResourceResponse<Permission> response,
CosmosAsyncUser cosmosUser) {
return new CosmosAsyncPermissionResponse(response, cosmosUser);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(ResourceResponse<StoredProcedure> response,
CosmosAsyncContainer cosmosContainer) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(StoredProcedureResponse response,
CosmosAsyncContainer cosmosContainer,
String storedProcedureId) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer, storedProcedureId);
}
public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) {
return new CosmosStoredProcedureProperties(jsonString);
}
public static CosmosAsyncTriggerResponse createCosmosAsyncTriggerResponse(ResourceResponse<Trigger> response,
CosmosAsyncContainer container) {
return new CosmosAsyncTriggerResponse(response, container);
}
public static CosmosAsyncUserDefinedFunctionResponse createCosmosAsyncUserDefinedFunctionResponse(ResourceResponse<UserDefinedFunction> response,
CosmosAsyncContainer container) {
return new CosmosAsyncUserDefinedFunctionResponse(response, container);
}
public static CosmosAsyncUserResponse createCosmosAsyncUserResponse(ResourceResponse<User> response, CosmosAsyncDatabase database) {
return new CosmosAsyncUserResponse(response, database);
}
public static CosmosContainerResponse createCosmosContainerResponse(CosmosAsyncContainerResponse response,
CosmosDatabase database, CosmosClient client) {
return new CosmosContainerResponse(response, database, client);
}
public static CosmosUserResponse createCosmosUserResponse(CosmosAsyncUserResponse response, CosmosDatabase database) {
return new CosmosUserResponse(response, database);
}
public static <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosAsyncItemResponse<T> response) {
return new CosmosItemResponse<>(response);
}
public static CosmosDatabaseResponse createCosmosDatabaseResponse(CosmosAsyncDatabaseResponse response, CosmosClient client) {
return new CosmosDatabaseResponse(response, client);
}
public static CosmosStoredProcedureResponse createCosmosStoredProcedureResponse(CosmosAsyncStoredProcedureResponse resourceResponse,
CosmosStoredProcedure storedProcedure) {
return new CosmosStoredProcedureResponse(resourceResponse, storedProcedure);
}
public static CosmosUserDefinedFunctionResponse createCosmosUserDefinedFunctionResponse(CosmosAsyncUserDefinedFunctionResponse resourceResponse,
CosmosUserDefinedFunction userDefinedFunction) {
return new CosmosUserDefinedFunctionResponse(resourceResponse, userDefinedFunction);
}
public static CosmosTriggerResponse createCosmosTriggerResponse(CosmosAsyncTriggerResponse asyncResponse,
CosmosTrigger syncTrigger) {
return new CosmosTriggerResponse(asyncResponse, syncTrigger);
}
public static List<CosmosConflictProperties> getCosmosConflictPropertiesFromV2Results(List<Conflict> results) {
return CosmosConflictProperties.getFromV2Results(results);
}
public static DocumentCollection getV2Collection(CosmosContainerProperties containerProperties) {
return containerProperties.getV2Collection();
}
public static List<CosmosContainerProperties> getCosmosContainerPropertiesFromV2Results(List<DocumentCollection> results) {
return CosmosContainerProperties.getFromV2Results(results);
}
public static List<CosmosDatabaseProperties> getCosmosDatabasePropertiesFromV2Results(List<Database> results) {
return CosmosDatabaseProperties.getFromV2Results(results);
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosAsyncItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static Permission getV2Permissions(CosmosPermissionProperties permissionSettings) {
return permissionSettings.getV2Permissions();
}
public static List<CosmosPermissionProperties> getCosmosPermissionPropertiesFromV2Results(List<Permission> results) {
return CosmosPermissionProperties.getFromV2Results(results);
}
public static List<CosmosStoredProcedureProperties> getCosmosStoredProcedurePropertiesFromV2Results(List<StoredProcedure> results) {
return CosmosStoredProcedureProperties.getFromV2Results(results);
}
public static List<CosmosTriggerProperties> getCosmosTriggerPropertiesFromV2Results(List<Trigger> results) {
return CosmosTriggerProperties.getFromV2Results(results);
}
public static List<CosmosUserDefinedFunctionProperties> getCosmosUserDefinedFunctionPropertiesFromV2Results(List<UserDefinedFunction> results) {
return CosmosUserDefinedFunctionProperties.getFromV2Results(results);
}
public static User getV2User(CosmosUserProperties cosmosUserProperties) {
return cosmosUserProperties.getV2User();
}
public static List<CosmosUserProperties> getCosmosUserPropertiesFromV2Results(List<User> results) {
return CosmosUserProperties.getFromV2Results(results);
}
public static RequestOptions toRequestOptions(CosmosConflictRequestOptions cosmosConflictRequestOptions) {
return cosmosConflictRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosContainerRequestOptions cosmosContainerRequestOptions) {
return cosmosContainerRequestOptions.toRequestOptions();
}
public static CosmosContainerRequestOptions setOfferThroughput(CosmosContainerRequestOptions cosmosContainerRequestOptions,
Integer offerThroughput) {
return cosmosContainerRequestOptions.setOfferThroughput(offerThroughput);
}
public static RequestOptions toRequestOptions(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions) {
return cosmosDatabaseRequestOptions.toRequestOptions();
}
public static CosmosDatabaseRequestOptions setOfferThroughput(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions,
Integer offerThroughput) {
return cosmosDatabaseRequestOptions.setOfferThroughput(offerThroughput);
}
public static CosmosItemRequestOptions setPartitionKey(CosmosItemRequestOptions cosmosItemRequestOptions,
PartitionKey partitionKey) {
return cosmosItemRequestOptions.setPartitionKey(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosItemRequestOptions cosmosItemRequestOptions) {
return cosmosItemRequestOptions.toRequestOptions();
}
public static CosmosItemRequestOptions createCosmosItemRequestOptions(PartitionKey partitionKey) {
return new CosmosItemRequestOptions(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosPermissionRequestOptions cosmosPermissionRequestOptions) {
return cosmosPermissionRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosStoredProcedureRequestOptions cosmosStoredProcedureRequestOptions) {
return cosmosStoredProcedureRequestOptions.toRequestOptions();
}
public static String getAddressesLink(DatabaseAccount databaseAccount) {
return databaseAccount.getAddressesLink();
}
public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) {
DatabaseAccount account = response.getResource(DatabaseAccount.class);
Map<String, String> responseHeader = response.getResponseHeaders();
account.setMaxMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB)));
account.setMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB)));
return account;
}
public static Map<String, Object> getQueryEngineConfiuration(DatabaseAccount databaseAccount) {
return databaseAccount.getQueryEngineConfiguration();
}
public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getReplicationPolicy();
}
public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getSystemReplicationPolicy();
}
public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getConsistencyPolicy();
}
/**
* Gets the partitionKeyRangeId.
*
* @param options the feed options
* @return the partitionKeyRangeId.
*/
public static String partitionKeyRangeIdInternal(FeedOptions options) {
return options.getPartitionKeyRangeIdInternal();
}
/**
* Sets the PartitionKeyRangeId.
*
* @param options the feed options
* @param partitionKeyRangeId the partition key range id
* @return the partitionKeyRangeId.
*/
public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) {
return options.setPartitionKeyRangeIdInternal(partitionKeyRangeId);
}
public static <T extends Resource> FeedResponse<T> toFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(response.getQueryResponse(cls), response.getResponseHeaders());
}
public static <T> FeedResponse<T> toFeedResponsePage(List<T> results, Map<String, String> headers, boolean noChanges) {
return new FeedResponse<>(results, headers, noChanges);
}
public static <T extends Resource> FeedResponse<T> toChaneFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls),
response.getResponseHeaders(), noChanges(response));
}
public static <T extends Resource> boolean noChanges(FeedResponse<T> page) {
return page.nochanges;
}
public static <T extends Resource> boolean noChanges(RxDocumentServiceResponse rsp) {
return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED;
}
public static <T> FeedResponse<T> createFeedResponse(List<T> results,
Map<String, String> headers) {
return new FeedResponse<>(results, headers);
}
public static <T> FeedResponse<T> createFeedResponseWithQueryMetrics(List<T> results,
Map<String, String> headers, ConcurrentMap<String, QueryMetrics> queryMetricsMap) {
return new FeedResponse<>(results, headers, queryMetricsMap);
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetricsMap(FeedResponse<T> feedResponse) {
return feedResponse.queryMetricsMap();
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetrics(FeedResponse<T> feedResponse) {
return feedResponse.queryMetrics();
}
public static String toLower(RequestVerb verb) {
return verb.toLowerCase();
}
public static boolean isV2(PartitionKeyDefinition pkd) {
return pkd.getVersion() != null && PartitionKeyDefinitionVersion.V2.val == pkd.getVersion().val;
}
public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getNonePartitionKeyValue();
}
public static PartitionKeyInternal getPartitionKeyInternal(PartitionKey partitionKey) {
return partitionKey.getInternalPartitionKey();
}
public static PartitionKey partitionKeyfromJsonString(String jsonString) {
return PartitionKey.fromJsonString(jsonString);
}
public static Object getPartitionKeyObject(PartitionKey right) {
return right.getKeyObject();
}
public static String getAltLink(Resource resource) {
return resource.getAltLink();
}
public static void setAltLink(Resource resource, String altLink) {
resource.setAltLink(altLink);
}
public static void setResourceSelfLink(Resource resource, String selfLink) {
resource.setSelfLink(selfLink);
}
public static void setTimestamp(Resource resource, OffsetDateTime date) {
resource.setTimestamp(date);
}
public static void validateResource(Resource resource) {
Resource.validateResource(resource);
}
public static <T> void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) {
jsonSerializable.set(propertyName, value);
}
public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) {
return jsonSerializable.getObject(propertyName);
}
public static void remove(JsonSerializable jsonSerializable, String propertyName) {
jsonSerializable.remove(propertyName);
}
public static Object getValue(JsonNode value) {
return JsonSerializable.getValue(value);
}
public static CosmosError createCosmosError(ObjectNode objectNode) {
return new CosmosError(objectNode);
}
public static CosmosError createCosmosError(String jsonString) {
return new CosmosError(jsonString);
}
public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) {
jsonSerializable.populatePropertyBag();
}
public static JsonSerializable instantiateJsonSerializable(ObjectNode objectNode, Class<?> klassType) {
try {
if (klassType.equals(Document.class) || klassType.equals(OrderByRowResult.class) || klassType.equals(CosmosItemProperties.class)
|| klassType.equals(PartitionKeyRange.class) || klassType.equals(Range.class)
|| klassType.equals(QueryInfo.class) || klassType.equals(PartitionedQueryExecutionInfoInternal.class)
|| klassType.equals(QueryItem.class)
|| klassType.equals(Address.class)
|| klassType.equals(DatabaseAccount.class) || klassType.equals(DatabaseAccountLocation.class)
|| klassType.equals(ReplicationPolicy.class) || klassType.equals(ConsistencyPolicy.class)
|| klassType.equals(DocumentCollection.class) || klassType.equals(Database.class)) {
return (JsonSerializable) klassType.getDeclaredConstructor(ObjectNode.class).newInstance(objectNode);
} else {
return (JsonSerializable) klassType.getDeclaredConstructor(String.class).newInstance(Utils.toJson(Utils.getSimpleObjectMapper(), objectNode));
}
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | IllegalArgumentException e) {
throw new IllegalArgumentException(e);
}
}
} |
@moderakh - the method `createCosmosAsyncItemResponseWithObjectType` is called from only one place with second parameter as `Object.class`. [here](https://github.com/Azure/azure-sdk-for-java/blob/2c32ea66cefeec207d9c88d5bff79961fc6e5cef/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosAsyncContainer.java#L524). Hence I thought of simplifying it since the method name itself convey that its Object Type. Do you see any issues? | public static CosmosAsyncItemResponse<Object> createCosmosAsyncItemResponseWithObjectType(ResourceResponse<Document> response) {
return new CosmosAsyncItemResponse<>(response, Object.class);
} | return new CosmosAsyncItemResponse<>(response, Object.class); | public static CosmosAsyncItemResponse<Object> createCosmosAsyncItemResponseWithObjectType(ResourceResponse<Document> response) {
return new CosmosAsyncItemResponse<>(response, Object.class);
} | class ModelBridgeInternal {
public static CosmosAsyncConflictResponse createCosmosAsyncConflictResponse(ResourceResponse<Conflict> response,
CosmosAsyncContainer container) {
return new CosmosAsyncConflictResponse(response, container);
}
public static CosmosAsyncContainerResponse createCosmosAsyncContainerResponse(ResourceResponse<DocumentCollection> response,
CosmosAsyncDatabase database) {
return new CosmosAsyncContainerResponse(response, database);
}
public static CosmosAsyncDatabaseResponse createCosmosAsyncDatabaseResponse(ResourceResponse<Database> response,
CosmosAsyncClient client) {
return new CosmosAsyncDatabaseResponse(response, client);
}
public static <T> CosmosAsyncItemResponse<T> createCosmosAsyncItemResponse(ResourceResponse<Document> response, Class<T> classType) {
return new CosmosAsyncItemResponse<>(response, classType);
}
public static CosmosAsyncPermissionResponse createCosmosAsyncPermissionResponse(ResourceResponse<Permission> response,
CosmosAsyncUser cosmosUser) {
return new CosmosAsyncPermissionResponse(response, cosmosUser);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(ResourceResponse<StoredProcedure> response,
CosmosAsyncContainer cosmosContainer) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(StoredProcedureResponse response,
CosmosAsyncContainer cosmosContainer,
String storedProcedureId) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer, storedProcedureId);
}
public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) {
return new CosmosStoredProcedureProperties(jsonString);
}
public static CosmosAsyncTriggerResponse createCosmosAsyncTriggerResponse(ResourceResponse<Trigger> response,
CosmosAsyncContainer container) {
return new CosmosAsyncTriggerResponse(response, container);
}
public static CosmosAsyncUserDefinedFunctionResponse createCosmosAsyncUserDefinedFunctionResponse(ResourceResponse<UserDefinedFunction> response,
CosmosAsyncContainer container) {
return new CosmosAsyncUserDefinedFunctionResponse(response, container);
}
public static CosmosAsyncUserResponse createCosmosAsyncUserResponse(ResourceResponse<User> response, CosmosAsyncDatabase database) {
return new CosmosAsyncUserResponse(response, database);
}
public static CosmosContainerResponse createCosmosContainerResponse(CosmosAsyncContainerResponse response,
CosmosDatabase database, CosmosClient client) {
return new CosmosContainerResponse(response, database, client);
}
public static CosmosUserResponse createCosmosUserResponse(CosmosAsyncUserResponse response, CosmosDatabase database) {
return new CosmosUserResponse(response, database);
}
public static <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosAsyncItemResponse<T> response) {
return new CosmosItemResponse<>(response);
}
public static CosmosDatabaseResponse createCosmosDatabaseResponse(CosmosAsyncDatabaseResponse response, CosmosClient client) {
return new CosmosDatabaseResponse(response, client);
}
public static CosmosStoredProcedureResponse createCosmosStoredProcedureResponse(CosmosAsyncStoredProcedureResponse resourceResponse,
CosmosStoredProcedure storedProcedure) {
return new CosmosStoredProcedureResponse(resourceResponse, storedProcedure);
}
public static CosmosUserDefinedFunctionResponse createCosmosUserDefinedFunctionResponse(CosmosAsyncUserDefinedFunctionResponse resourceResponse,
CosmosUserDefinedFunction userDefinedFunction) {
return new CosmosUserDefinedFunctionResponse(resourceResponse, userDefinedFunction);
}
public static CosmosTriggerResponse createCosmosTriggerResponse(CosmosAsyncTriggerResponse asyncResponse,
CosmosTrigger syncTrigger) {
return new CosmosTriggerResponse(asyncResponse, syncTrigger);
}
public static List<CosmosConflictProperties> getCosmosConflictPropertiesFromV2Results(List<Conflict> results) {
return CosmosConflictProperties.getFromV2Results(results);
}
public static DocumentCollection getV2Collection(CosmosContainerProperties containerProperties) {
return containerProperties.getV2Collection();
}
public static List<CosmosContainerProperties> getCosmosContainerPropertiesFromV2Results(List<DocumentCollection> results) {
return CosmosContainerProperties.getFromV2Results(results);
}
public static List<CosmosDatabaseProperties> getCosmosDatabasePropertiesFromV2Results(List<Database> results) {
return CosmosDatabaseProperties.getFromV2Results(results);
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosAsyncItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static Permission getV2Permissions(CosmosPermissionProperties permissionSettings) {
return permissionSettings.getV2Permissions();
}
public static List<CosmosPermissionProperties> getCosmosPermissionPropertiesFromV2Results(List<Permission> results) {
return CosmosPermissionProperties.getFromV2Results(results);
}
public static List<CosmosStoredProcedureProperties> getCosmosStoredProcedurePropertiesFromV2Results(List<StoredProcedure> results) {
return CosmosStoredProcedureProperties.getFromV2Results(results);
}
public static List<CosmosTriggerProperties> getCosmosTriggerPropertiesFromV2Results(List<Trigger> results) {
return CosmosTriggerProperties.getFromV2Results(results);
}
public static List<CosmosUserDefinedFunctionProperties> getCosmosUserDefinedFunctionPropertiesFromV2Results(List<UserDefinedFunction> results) {
return CosmosUserDefinedFunctionProperties.getFromV2Results(results);
}
public static User getV2User(CosmosUserProperties cosmosUserProperties) {
return cosmosUserProperties.getV2User();
}
public static List<CosmosUserProperties> getCosmosUserPropertiesFromV2Results(List<User> results) {
return CosmosUserProperties.getFromV2Results(results);
}
public static RequestOptions toRequestOptions(CosmosConflictRequestOptions cosmosConflictRequestOptions) {
return cosmosConflictRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosContainerRequestOptions cosmosContainerRequestOptions) {
return cosmosContainerRequestOptions.toRequestOptions();
}
public static CosmosContainerRequestOptions setOfferThroughput(CosmosContainerRequestOptions cosmosContainerRequestOptions,
Integer offerThroughput) {
return cosmosContainerRequestOptions.setOfferThroughput(offerThroughput);
}
public static RequestOptions toRequestOptions(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions) {
return cosmosDatabaseRequestOptions.toRequestOptions();
}
public static CosmosDatabaseRequestOptions setOfferThroughput(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions,
Integer offerThroughput) {
return cosmosDatabaseRequestOptions.setOfferThroughput(offerThroughput);
}
public static CosmosItemRequestOptions setPartitionKey(CosmosItemRequestOptions cosmosItemRequestOptions,
PartitionKey partitionKey) {
return cosmosItemRequestOptions.setPartitionKey(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosItemRequestOptions cosmosItemRequestOptions) {
return cosmosItemRequestOptions.toRequestOptions();
}
public static CosmosItemRequestOptions createCosmosItemRequestOptions(PartitionKey partitionKey) {
return new CosmosItemRequestOptions(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosPermissionRequestOptions cosmosPermissionRequestOptions) {
return cosmosPermissionRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosStoredProcedureRequestOptions cosmosStoredProcedureRequestOptions) {
return cosmosStoredProcedureRequestOptions.toRequestOptions();
}
public static String getAddressesLink(DatabaseAccount databaseAccount) {
return databaseAccount.getAddressesLink();
}
public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) {
DatabaseAccount account = response.getResource(DatabaseAccount.class);
Map<String, String> responseHeader = response.getResponseHeaders();
account.setMaxMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB)));
account.setMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB)));
return account;
}
public static Map<String, Object> getQueryEngineConfiuration(DatabaseAccount databaseAccount) {
return databaseAccount.getQueryEngineConfiguration();
}
public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getReplicationPolicy();
}
public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getSystemReplicationPolicy();
}
public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getConsistencyPolicy();
}
/**
* Gets the partitionKeyRangeId.
*
* @param options the feed options
* @return the partitionKeyRangeId.
*/
public static String partitionKeyRangeIdInternal(FeedOptions options) {
return options.getPartitionKeyRangeIdInternal();
}
/**
* Sets the PartitionKeyRangeId.
*
* @param options the feed options
* @param partitionKeyRangeId the partition key range id
* @return the partitionKeyRangeId.
*/
public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) {
return options.setPartitionKeyRangeIdInternal(partitionKeyRangeId);
}
public static <T extends Resource> FeedResponse<T> toFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(response.getQueryResponse(cls), response.getResponseHeaders());
}
public static <T> FeedResponse<T> toFeedResponsePage(List<T> results, Map<String, String> headers, boolean noChanges) {
return new FeedResponse<>(results, headers, noChanges);
}
public static <T extends Resource> FeedResponse<T> toChaneFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls),
response.getResponseHeaders(), noChanges(response));
}
public static <T extends Resource> boolean noChanges(FeedResponse<T> page) {
return page.nochanges;
}
public static <T extends Resource> boolean noChanges(RxDocumentServiceResponse rsp) {
return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED;
}
public static <T> FeedResponse<T> createFeedResponse(List<T> results,
Map<String, String> headers) {
return new FeedResponse<>(results, headers);
}
public static <T> FeedResponse<T> createFeedResponseWithQueryMetrics(List<T> results,
Map<String, String> headers, ConcurrentMap<String, QueryMetrics> queryMetricsMap) {
return new FeedResponse<>(results, headers, queryMetricsMap);
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetricsMap(FeedResponse<T> feedResponse) {
return feedResponse.queryMetricsMap();
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetrics(FeedResponse<T> feedResponse) {
return feedResponse.queryMetrics();
}
public static String toLower(RequestVerb verb) {
return verb.toLowerCase();
}
public static boolean isV2(PartitionKeyDefinition pkd) {
return pkd.getVersion() != null && PartitionKeyDefinitionVersion.V2.val == pkd.getVersion().val;
}
public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getNonePartitionKeyValue();
}
public static PartitionKeyInternal getPartitionKeyInternal(PartitionKey partitionKey) {
return partitionKey.getInternalPartitionKey();
}
public static PartitionKey partitionKeyfromJsonString(String jsonString) {
return PartitionKey.fromJsonString(jsonString);
}
public static Object getPartitionKeyObject(PartitionKey right) {
return right.getKeyObject();
}
public static String getAltLink(Resource resource) {
return resource.getAltLink();
}
public static void setAltLink(Resource resource, String altLink) {
resource.setAltLink(altLink);
}
public static void setResourceSelfLink(Resource resource, String selfLink) {
resource.setSelfLink(selfLink);
}
public static void setTimestamp(Resource resource, OffsetDateTime date) {
resource.setTimestamp(date);
}
public static void validateResource(Resource resource) {
Resource.validateResource(resource);
}
public static <T> void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) {
jsonSerializable.set(propertyName, value);
}
public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) {
return jsonSerializable.getObject(propertyName);
}
public static void remove(JsonSerializable jsonSerializable, String propertyName) {
jsonSerializable.remove(propertyName);
}
public static Object getValue(JsonNode value) {
return JsonSerializable.getValue(value);
}
public static CosmosError createCosmosError(ObjectNode objectNode) {
return new CosmosError(objectNode);
}
public static CosmosError createCosmosError(String jsonString) {
return new CosmosError(jsonString);
}
public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) {
jsonSerializable.populatePropertyBag();
}
public static JsonSerializable instantiateJsonSerializable(ObjectNode objectNode, Class<?> klassType) {
try {
if (klassType.equals(Document.class) || klassType.equals(OrderByRowResult.class) || klassType.equals(CosmosItemProperties.class)
|| klassType.equals(PartitionKeyRange.class) || klassType.equals(Range.class)
|| klassType.equals(QueryInfo.class) || klassType.equals(PartitionedQueryExecutionInfoInternal.class)
|| klassType.equals(QueryItem.class)
|| klassType.equals(Address.class)
|| klassType.equals(DatabaseAccount.class) || klassType.equals(DatabaseAccountLocation.class)
|| klassType.equals(ReplicationPolicy.class) || klassType.equals(ConsistencyPolicy.class)
|| klassType.equals(DocumentCollection.class) || klassType.equals(Database.class)) {
return (JsonSerializable) klassType.getDeclaredConstructor(ObjectNode.class).newInstance(objectNode);
} else {
return (JsonSerializable) klassType.getDeclaredConstructor(String.class).newInstance(Utils.toJson(Utils.getSimpleObjectMapper(), objectNode));
}
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | IllegalArgumentException e) {
throw new IllegalArgumentException(e);
}
}
} | class ModelBridgeInternal {
public static CosmosAsyncConflictResponse createCosmosAsyncConflictResponse(ResourceResponse<Conflict> response,
CosmosAsyncContainer container) {
return new CosmosAsyncConflictResponse(response, container);
}
public static CosmosAsyncContainerResponse createCosmosAsyncContainerResponse(ResourceResponse<DocumentCollection> response,
CosmosAsyncDatabase database) {
return new CosmosAsyncContainerResponse(response, database);
}
public static CosmosAsyncDatabaseResponse createCosmosAsyncDatabaseResponse(ResourceResponse<Database> response,
CosmosAsyncClient client) {
return new CosmosAsyncDatabaseResponse(response, client);
}
public static <T> CosmosAsyncItemResponse<T> createCosmosAsyncItemResponse(ResourceResponse<Document> response, Class<T> classType) {
return new CosmosAsyncItemResponse<>(response, classType);
}
public static CosmosAsyncPermissionResponse createCosmosAsyncPermissionResponse(ResourceResponse<Permission> response,
CosmosAsyncUser cosmosUser) {
return new CosmosAsyncPermissionResponse(response, cosmosUser);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(ResourceResponse<StoredProcedure> response,
CosmosAsyncContainer cosmosContainer) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(StoredProcedureResponse response,
CosmosAsyncContainer cosmosContainer,
String storedProcedureId) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer, storedProcedureId);
}
public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) {
return new CosmosStoredProcedureProperties(jsonString);
}
public static CosmosAsyncTriggerResponse createCosmosAsyncTriggerResponse(ResourceResponse<Trigger> response,
CosmosAsyncContainer container) {
return new CosmosAsyncTriggerResponse(response, container);
}
public static CosmosAsyncUserDefinedFunctionResponse createCosmosAsyncUserDefinedFunctionResponse(ResourceResponse<UserDefinedFunction> response,
CosmosAsyncContainer container) {
return new CosmosAsyncUserDefinedFunctionResponse(response, container);
}
public static CosmosAsyncUserResponse createCosmosAsyncUserResponse(ResourceResponse<User> response, CosmosAsyncDatabase database) {
return new CosmosAsyncUserResponse(response, database);
}
public static CosmosContainerResponse createCosmosContainerResponse(CosmosAsyncContainerResponse response,
CosmosDatabase database, CosmosClient client) {
return new CosmosContainerResponse(response, database, client);
}
public static CosmosUserResponse createCosmosUserResponse(CosmosAsyncUserResponse response, CosmosDatabase database) {
return new CosmosUserResponse(response, database);
}
public static <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosAsyncItemResponse<T> response) {
return new CosmosItemResponse<>(response);
}
public static CosmosDatabaseResponse createCosmosDatabaseResponse(CosmosAsyncDatabaseResponse response, CosmosClient client) {
return new CosmosDatabaseResponse(response, client);
}
public static CosmosStoredProcedureResponse createCosmosStoredProcedureResponse(CosmosAsyncStoredProcedureResponse resourceResponse,
CosmosStoredProcedure storedProcedure) {
return new CosmosStoredProcedureResponse(resourceResponse, storedProcedure);
}
public static CosmosUserDefinedFunctionResponse createCosmosUserDefinedFunctionResponse(CosmosAsyncUserDefinedFunctionResponse resourceResponse,
CosmosUserDefinedFunction userDefinedFunction) {
return new CosmosUserDefinedFunctionResponse(resourceResponse, userDefinedFunction);
}
public static CosmosTriggerResponse createCosmosTriggerResponse(CosmosAsyncTriggerResponse asyncResponse,
CosmosTrigger syncTrigger) {
return new CosmosTriggerResponse(asyncResponse, syncTrigger);
}
public static List<CosmosConflictProperties> getCosmosConflictPropertiesFromV2Results(List<Conflict> results) {
return CosmosConflictProperties.getFromV2Results(results);
}
public static DocumentCollection getV2Collection(CosmosContainerProperties containerProperties) {
return containerProperties.getV2Collection();
}
public static List<CosmosContainerProperties> getCosmosContainerPropertiesFromV2Results(List<DocumentCollection> results) {
return CosmosContainerProperties.getFromV2Results(results);
}
public static List<CosmosDatabaseProperties> getCosmosDatabasePropertiesFromV2Results(List<Database> results) {
return CosmosDatabaseProperties.getFromV2Results(results);
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosAsyncItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static Permission getV2Permissions(CosmosPermissionProperties permissionSettings) {
return permissionSettings.getV2Permissions();
}
public static List<CosmosPermissionProperties> getCosmosPermissionPropertiesFromV2Results(List<Permission> results) {
return CosmosPermissionProperties.getFromV2Results(results);
}
public static List<CosmosStoredProcedureProperties> getCosmosStoredProcedurePropertiesFromV2Results(List<StoredProcedure> results) {
return CosmosStoredProcedureProperties.getFromV2Results(results);
}
public static List<CosmosTriggerProperties> getCosmosTriggerPropertiesFromV2Results(List<Trigger> results) {
return CosmosTriggerProperties.getFromV2Results(results);
}
public static List<CosmosUserDefinedFunctionProperties> getCosmosUserDefinedFunctionPropertiesFromV2Results(List<UserDefinedFunction> results) {
return CosmosUserDefinedFunctionProperties.getFromV2Results(results);
}
public static User getV2User(CosmosUserProperties cosmosUserProperties) {
return cosmosUserProperties.getV2User();
}
public static List<CosmosUserProperties> getCosmosUserPropertiesFromV2Results(List<User> results) {
return CosmosUserProperties.getFromV2Results(results);
}
public static RequestOptions toRequestOptions(CosmosConflictRequestOptions cosmosConflictRequestOptions) {
return cosmosConflictRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosContainerRequestOptions cosmosContainerRequestOptions) {
return cosmosContainerRequestOptions.toRequestOptions();
}
public static CosmosContainerRequestOptions setOfferThroughput(CosmosContainerRequestOptions cosmosContainerRequestOptions,
Integer offerThroughput) {
return cosmosContainerRequestOptions.setOfferThroughput(offerThroughput);
}
public static RequestOptions toRequestOptions(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions) {
return cosmosDatabaseRequestOptions.toRequestOptions();
}
public static CosmosDatabaseRequestOptions setOfferThroughput(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions,
Integer offerThroughput) {
return cosmosDatabaseRequestOptions.setOfferThroughput(offerThroughput);
}
public static CosmosItemRequestOptions setPartitionKey(CosmosItemRequestOptions cosmosItemRequestOptions,
PartitionKey partitionKey) {
return cosmosItemRequestOptions.setPartitionKey(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosItemRequestOptions cosmosItemRequestOptions) {
return cosmosItemRequestOptions.toRequestOptions();
}
public static CosmosItemRequestOptions createCosmosItemRequestOptions(PartitionKey partitionKey) {
return new CosmosItemRequestOptions(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosPermissionRequestOptions cosmosPermissionRequestOptions) {
return cosmosPermissionRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosStoredProcedureRequestOptions cosmosStoredProcedureRequestOptions) {
return cosmosStoredProcedureRequestOptions.toRequestOptions();
}
public static String getAddressesLink(DatabaseAccount databaseAccount) {
return databaseAccount.getAddressesLink();
}
public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) {
DatabaseAccount account = response.getResource(DatabaseAccount.class);
Map<String, String> responseHeader = response.getResponseHeaders();
account.setMaxMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB)));
account.setMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB)));
return account;
}
public static Map<String, Object> getQueryEngineConfiuration(DatabaseAccount databaseAccount) {
return databaseAccount.getQueryEngineConfiguration();
}
public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getReplicationPolicy();
}
public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getSystemReplicationPolicy();
}
public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getConsistencyPolicy();
}
/**
* Gets the partitionKeyRangeId.
*
* @param options the feed options
* @return the partitionKeyRangeId.
*/
public static String partitionKeyRangeIdInternal(FeedOptions options) {
return options.getPartitionKeyRangeIdInternal();
}
/**
* Sets the PartitionKeyRangeId.
*
* @param options the feed options
* @param partitionKeyRangeId the partition key range id
* @return the partitionKeyRangeId.
*/
public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) {
return options.setPartitionKeyRangeIdInternal(partitionKeyRangeId);
}
public static <T extends Resource> FeedResponse<T> toFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(response.getQueryResponse(cls), response.getResponseHeaders());
}
public static <T> FeedResponse<T> toFeedResponsePage(List<T> results, Map<String, String> headers, boolean noChanges) {
return new FeedResponse<>(results, headers, noChanges);
}
public static <T extends Resource> FeedResponse<T> toChaneFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls),
response.getResponseHeaders(), noChanges(response));
}
public static <T extends Resource> boolean noChanges(FeedResponse<T> page) {
return page.nochanges;
}
public static <T extends Resource> boolean noChanges(RxDocumentServiceResponse rsp) {
return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED;
}
public static <T> FeedResponse<T> createFeedResponse(List<T> results,
Map<String, String> headers) {
return new FeedResponse<>(results, headers);
}
public static <T> FeedResponse<T> createFeedResponseWithQueryMetrics(List<T> results,
Map<String, String> headers, ConcurrentMap<String, QueryMetrics> queryMetricsMap) {
return new FeedResponse<>(results, headers, queryMetricsMap);
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetricsMap(FeedResponse<T> feedResponse) {
return feedResponse.queryMetricsMap();
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetrics(FeedResponse<T> feedResponse) {
return feedResponse.queryMetrics();
}
public static String toLower(RequestVerb verb) {
return verb.toLowerCase();
}
public static boolean isV2(PartitionKeyDefinition pkd) {
return pkd.getVersion() != null && PartitionKeyDefinitionVersion.V2.val == pkd.getVersion().val;
}
public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getNonePartitionKeyValue();
}
public static PartitionKeyInternal getPartitionKeyInternal(PartitionKey partitionKey) {
return partitionKey.getInternalPartitionKey();
}
public static PartitionKey partitionKeyfromJsonString(String jsonString) {
return PartitionKey.fromJsonString(jsonString);
}
public static Object getPartitionKeyObject(PartitionKey right) {
return right.getKeyObject();
}
public static String getAltLink(Resource resource) {
return resource.getAltLink();
}
public static void setAltLink(Resource resource, String altLink) {
resource.setAltLink(altLink);
}
public static void setResourceSelfLink(Resource resource, String selfLink) {
resource.setSelfLink(selfLink);
}
public static void setTimestamp(Resource resource, OffsetDateTime date) {
resource.setTimestamp(date);
}
public static void validateResource(Resource resource) {
Resource.validateResource(resource);
}
public static <T> void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) {
jsonSerializable.set(propertyName, value);
}
public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) {
return jsonSerializable.getObject(propertyName);
}
public static void remove(JsonSerializable jsonSerializable, String propertyName) {
jsonSerializable.remove(propertyName);
}
public static Object getValue(JsonNode value) {
return JsonSerializable.getValue(value);
}
public static CosmosError createCosmosError(ObjectNode objectNode) {
return new CosmosError(objectNode);
}
public static CosmosError createCosmosError(String jsonString) {
return new CosmosError(jsonString);
}
public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) {
jsonSerializable.populatePropertyBag();
}
public static JsonSerializable instantiateJsonSerializable(ObjectNode objectNode, Class<?> klassType) {
try {
if (klassType.equals(Document.class) || klassType.equals(OrderByRowResult.class) || klassType.equals(CosmosItemProperties.class)
|| klassType.equals(PartitionKeyRange.class) || klassType.equals(Range.class)
|| klassType.equals(QueryInfo.class) || klassType.equals(PartitionedQueryExecutionInfoInternal.class)
|| klassType.equals(QueryItem.class)
|| klassType.equals(Address.class)
|| klassType.equals(DatabaseAccount.class) || klassType.equals(DatabaseAccountLocation.class)
|| klassType.equals(ReplicationPolicy.class) || klassType.equals(ConsistencyPolicy.class)
|| klassType.equals(DocumentCollection.class) || klassType.equals(Database.class)) {
return (JsonSerializable) klassType.getDeclaredConstructor(ObjectNode.class).newInstance(objectNode);
} else {
return (JsonSerializable) klassType.getDeclaredConstructor(String.class).newInstance(Utils.toJson(Utils.getSimpleObjectMapper(), objectNode));
}
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | IllegalArgumentException e) {
throw new IllegalArgumentException(e);
}
}
} |
:) removed | public int hashCode() {
if (this.components == null || this.components.size() == 0) {
return 0;
}
int [] ordinals = new int[this.components.size()];
for (int i = 0; i < this.components.size(); i++) {
ordinals[i] = this.components.get(i).GetTypeOrdinal();
}
return Objects.hashCode(ordinals);
} | } | public int hashCode() {
return super.hashCode();
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} |
@moderakh - yes this particular API is specifically for Object type -> as this is being called by deleteItem(String id) -> which doesn't take any class type information. So this is for generic object type. | public static CosmosAsyncItemResponse<Object> createCosmosAsyncItemResponseWithObjectType(ResourceResponse<Document> response) {
return new CosmosAsyncItemResponse<>(response, Object.class);
} | return new CosmosAsyncItemResponse<>(response, Object.class); | public static CosmosAsyncItemResponse<Object> createCosmosAsyncItemResponseWithObjectType(ResourceResponse<Document> response) {
return new CosmosAsyncItemResponse<>(response, Object.class);
} | class ModelBridgeInternal {
public static CosmosAsyncConflictResponse createCosmosAsyncConflictResponse(ResourceResponse<Conflict> response,
CosmosAsyncContainer container) {
return new CosmosAsyncConflictResponse(response, container);
}
public static CosmosAsyncContainerResponse createCosmosAsyncContainerResponse(ResourceResponse<DocumentCollection> response,
CosmosAsyncDatabase database) {
return new CosmosAsyncContainerResponse(response, database);
}
public static CosmosAsyncDatabaseResponse createCosmosAsyncDatabaseResponse(ResourceResponse<Database> response,
CosmosAsyncClient client) {
return new CosmosAsyncDatabaseResponse(response, client);
}
public static <T> CosmosAsyncItemResponse<T> createCosmosAsyncItemResponse(ResourceResponse<Document> response, Class<T> classType) {
return new CosmosAsyncItemResponse<>(response, classType);
}
public static CosmosAsyncPermissionResponse createCosmosAsyncPermissionResponse(ResourceResponse<Permission> response,
CosmosAsyncUser cosmosUser) {
return new CosmosAsyncPermissionResponse(response, cosmosUser);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(ResourceResponse<StoredProcedure> response,
CosmosAsyncContainer cosmosContainer) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(StoredProcedureResponse response,
CosmosAsyncContainer cosmosContainer,
String storedProcedureId) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer, storedProcedureId);
}
public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) {
return new CosmosStoredProcedureProperties(jsonString);
}
public static CosmosAsyncTriggerResponse createCosmosAsyncTriggerResponse(ResourceResponse<Trigger> response,
CosmosAsyncContainer container) {
return new CosmosAsyncTriggerResponse(response, container);
}
public static CosmosAsyncUserDefinedFunctionResponse createCosmosAsyncUserDefinedFunctionResponse(ResourceResponse<UserDefinedFunction> response,
CosmosAsyncContainer container) {
return new CosmosAsyncUserDefinedFunctionResponse(response, container);
}
public static CosmosAsyncUserResponse createCosmosAsyncUserResponse(ResourceResponse<User> response, CosmosAsyncDatabase database) {
return new CosmosAsyncUserResponse(response, database);
}
public static CosmosContainerResponse createCosmosContainerResponse(CosmosAsyncContainerResponse response,
CosmosDatabase database, CosmosClient client) {
return new CosmosContainerResponse(response, database, client);
}
public static CosmosUserResponse createCosmosUserResponse(CosmosAsyncUserResponse response, CosmosDatabase database) {
return new CosmosUserResponse(response, database);
}
public static <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosAsyncItemResponse<T> response) {
return new CosmosItemResponse<>(response);
}
public static CosmosDatabaseResponse createCosmosDatabaseResponse(CosmosAsyncDatabaseResponse response, CosmosClient client) {
return new CosmosDatabaseResponse(response, client);
}
public static CosmosStoredProcedureResponse createCosmosStoredProcedureResponse(CosmosAsyncStoredProcedureResponse resourceResponse,
CosmosStoredProcedure storedProcedure) {
return new CosmosStoredProcedureResponse(resourceResponse, storedProcedure);
}
public static CosmosUserDefinedFunctionResponse createCosmosUserDefinedFunctionResponse(CosmosAsyncUserDefinedFunctionResponse resourceResponse,
CosmosUserDefinedFunction userDefinedFunction) {
return new CosmosUserDefinedFunctionResponse(resourceResponse, userDefinedFunction);
}
public static CosmosTriggerResponse createCosmosTriggerResponse(CosmosAsyncTriggerResponse asyncResponse,
CosmosTrigger syncTrigger) {
return new CosmosTriggerResponse(asyncResponse, syncTrigger);
}
public static List<CosmosConflictProperties> getCosmosConflictPropertiesFromV2Results(List<Conflict> results) {
return CosmosConflictProperties.getFromV2Results(results);
}
public static DocumentCollection getV2Collection(CosmosContainerProperties containerProperties) {
return containerProperties.getV2Collection();
}
public static List<CosmosContainerProperties> getCosmosContainerPropertiesFromV2Results(List<DocumentCollection> results) {
return CosmosContainerProperties.getFromV2Results(results);
}
public static List<CosmosDatabaseProperties> getCosmosDatabasePropertiesFromV2Results(List<Database> results) {
return CosmosDatabaseProperties.getFromV2Results(results);
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosAsyncItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static Permission getV2Permissions(CosmosPermissionProperties permissionSettings) {
return permissionSettings.getV2Permissions();
}
public static List<CosmosPermissionProperties> getCosmosPermissionPropertiesFromV2Results(List<Permission> results) {
return CosmosPermissionProperties.getFromV2Results(results);
}
public static List<CosmosStoredProcedureProperties> getCosmosStoredProcedurePropertiesFromV2Results(List<StoredProcedure> results) {
return CosmosStoredProcedureProperties.getFromV2Results(results);
}
public static List<CosmosTriggerProperties> getCosmosTriggerPropertiesFromV2Results(List<Trigger> results) {
return CosmosTriggerProperties.getFromV2Results(results);
}
public static List<CosmosUserDefinedFunctionProperties> getCosmosUserDefinedFunctionPropertiesFromV2Results(List<UserDefinedFunction> results) {
return CosmosUserDefinedFunctionProperties.getFromV2Results(results);
}
public static User getV2User(CosmosUserProperties cosmosUserProperties) {
return cosmosUserProperties.getV2User();
}
public static List<CosmosUserProperties> getCosmosUserPropertiesFromV2Results(List<User> results) {
return CosmosUserProperties.getFromV2Results(results);
}
public static RequestOptions toRequestOptions(CosmosConflictRequestOptions cosmosConflictRequestOptions) {
return cosmosConflictRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosContainerRequestOptions cosmosContainerRequestOptions) {
return cosmosContainerRequestOptions.toRequestOptions();
}
public static CosmosContainerRequestOptions setOfferThroughput(CosmosContainerRequestOptions cosmosContainerRequestOptions,
Integer offerThroughput) {
return cosmosContainerRequestOptions.setOfferThroughput(offerThroughput);
}
public static RequestOptions toRequestOptions(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions) {
return cosmosDatabaseRequestOptions.toRequestOptions();
}
public static CosmosDatabaseRequestOptions setOfferThroughput(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions,
Integer offerThroughput) {
return cosmosDatabaseRequestOptions.setOfferThroughput(offerThroughput);
}
public static CosmosItemRequestOptions setPartitionKey(CosmosItemRequestOptions cosmosItemRequestOptions,
PartitionKey partitionKey) {
return cosmosItemRequestOptions.setPartitionKey(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosItemRequestOptions cosmosItemRequestOptions) {
return cosmosItemRequestOptions.toRequestOptions();
}
public static CosmosItemRequestOptions createCosmosItemRequestOptions(PartitionKey partitionKey) {
return new CosmosItemRequestOptions(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosPermissionRequestOptions cosmosPermissionRequestOptions) {
return cosmosPermissionRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosStoredProcedureRequestOptions cosmosStoredProcedureRequestOptions) {
return cosmosStoredProcedureRequestOptions.toRequestOptions();
}
public static String getAddressesLink(DatabaseAccount databaseAccount) {
return databaseAccount.getAddressesLink();
}
public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) {
DatabaseAccount account = response.getResource(DatabaseAccount.class);
Map<String, String> responseHeader = response.getResponseHeaders();
account.setMaxMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB)));
account.setMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB)));
return account;
}
public static Map<String, Object> getQueryEngineConfiuration(DatabaseAccount databaseAccount) {
return databaseAccount.getQueryEngineConfiguration();
}
public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getReplicationPolicy();
}
public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getSystemReplicationPolicy();
}
public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getConsistencyPolicy();
}
/**
* Gets the partitionKeyRangeId.
*
* @param options the feed options
* @return the partitionKeyRangeId.
*/
public static String partitionKeyRangeIdInternal(FeedOptions options) {
return options.getPartitionKeyRangeIdInternal();
}
/**
* Sets the PartitionKeyRangeId.
*
* @param options the feed options
* @param partitionKeyRangeId the partition key range id
* @return the partitionKeyRangeId.
*/
public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) {
return options.setPartitionKeyRangeIdInternal(partitionKeyRangeId);
}
public static <T extends Resource> FeedResponse<T> toFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(response.getQueryResponse(cls), response.getResponseHeaders());
}
public static <T> FeedResponse<T> toFeedResponsePage(List<T> results, Map<String, String> headers, boolean noChanges) {
return new FeedResponse<>(results, headers, noChanges);
}
public static <T extends Resource> FeedResponse<T> toChaneFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls),
response.getResponseHeaders(), noChanges(response));
}
public static <T extends Resource> boolean noChanges(FeedResponse<T> page) {
return page.nochanges;
}
public static <T extends Resource> boolean noChanges(RxDocumentServiceResponse rsp) {
return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED;
}
public static <T> FeedResponse<T> createFeedResponse(List<T> results,
Map<String, String> headers) {
return new FeedResponse<>(results, headers);
}
public static <T> FeedResponse<T> createFeedResponseWithQueryMetrics(List<T> results,
Map<String, String> headers, ConcurrentMap<String, QueryMetrics> queryMetricsMap) {
return new FeedResponse<>(results, headers, queryMetricsMap);
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetricsMap(FeedResponse<T> feedResponse) {
return feedResponse.queryMetricsMap();
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetrics(FeedResponse<T> feedResponse) {
return feedResponse.queryMetrics();
}
public static String toLower(RequestVerb verb) {
return verb.toLowerCase();
}
public static boolean isV2(PartitionKeyDefinition pkd) {
return pkd.getVersion() != null && PartitionKeyDefinitionVersion.V2.val == pkd.getVersion().val;
}
public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getNonePartitionKeyValue();
}
public static PartitionKeyInternal getPartitionKeyInternal(PartitionKey partitionKey) {
return partitionKey.getInternalPartitionKey();
}
public static PartitionKey partitionKeyfromJsonString(String jsonString) {
return PartitionKey.fromJsonString(jsonString);
}
public static Object getPartitionKeyObject(PartitionKey right) {
return right.getKeyObject();
}
public static String getAltLink(Resource resource) {
return resource.getAltLink();
}
public static void setAltLink(Resource resource, String altLink) {
resource.setAltLink(altLink);
}
public static void setResourceSelfLink(Resource resource, String selfLink) {
resource.setSelfLink(selfLink);
}
public static void setTimestamp(Resource resource, OffsetDateTime date) {
resource.setTimestamp(date);
}
public static void validateResource(Resource resource) {
Resource.validateResource(resource);
}
public static <T> void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) {
jsonSerializable.set(propertyName, value);
}
public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) {
return jsonSerializable.getObject(propertyName);
}
public static void remove(JsonSerializable jsonSerializable, String propertyName) {
jsonSerializable.remove(propertyName);
}
public static Object getValue(JsonNode value) {
return JsonSerializable.getValue(value);
}
public static CosmosError createCosmosError(ObjectNode objectNode) {
return new CosmosError(objectNode);
}
public static CosmosError createCosmosError(String jsonString) {
return new CosmosError(jsonString);
}
public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) {
jsonSerializable.populatePropertyBag();
}
public static JsonSerializable instantiateJsonSerializable(ObjectNode objectNode, Class<?> klassType) {
try {
if (klassType.equals(Document.class) || klassType.equals(OrderByRowResult.class) || klassType.equals(CosmosItemProperties.class)
|| klassType.equals(PartitionKeyRange.class) || klassType.equals(Range.class)
|| klassType.equals(QueryInfo.class) || klassType.equals(PartitionedQueryExecutionInfoInternal.class)
|| klassType.equals(QueryItem.class)
|| klassType.equals(Address.class)
|| klassType.equals(DatabaseAccount.class) || klassType.equals(DatabaseAccountLocation.class)
|| klassType.equals(ReplicationPolicy.class) || klassType.equals(ConsistencyPolicy.class)
|| klassType.equals(DocumentCollection.class) || klassType.equals(Database.class)) {
return (JsonSerializable) klassType.getDeclaredConstructor(ObjectNode.class).newInstance(objectNode);
} else {
return (JsonSerializable) klassType.getDeclaredConstructor(String.class).newInstance(Utils.toJson(Utils.getSimpleObjectMapper(), objectNode));
}
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | IllegalArgumentException e) {
throw new IllegalArgumentException(e);
}
}
} | class ModelBridgeInternal {
public static CosmosAsyncConflictResponse createCosmosAsyncConflictResponse(ResourceResponse<Conflict> response,
CosmosAsyncContainer container) {
return new CosmosAsyncConflictResponse(response, container);
}
public static CosmosAsyncContainerResponse createCosmosAsyncContainerResponse(ResourceResponse<DocumentCollection> response,
CosmosAsyncDatabase database) {
return new CosmosAsyncContainerResponse(response, database);
}
public static CosmosAsyncDatabaseResponse createCosmosAsyncDatabaseResponse(ResourceResponse<Database> response,
CosmosAsyncClient client) {
return new CosmosAsyncDatabaseResponse(response, client);
}
public static <T> CosmosAsyncItemResponse<T> createCosmosAsyncItemResponse(ResourceResponse<Document> response, Class<T> classType) {
return new CosmosAsyncItemResponse<>(response, classType);
}
public static CosmosAsyncPermissionResponse createCosmosAsyncPermissionResponse(ResourceResponse<Permission> response,
CosmosAsyncUser cosmosUser) {
return new CosmosAsyncPermissionResponse(response, cosmosUser);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(ResourceResponse<StoredProcedure> response,
CosmosAsyncContainer cosmosContainer) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer);
}
public static CosmosAsyncStoredProcedureResponse createCosmosAsyncStoredProcedureResponse(StoredProcedureResponse response,
CosmosAsyncContainer cosmosContainer,
String storedProcedureId) {
return new CosmosAsyncStoredProcedureResponse(response, cosmosContainer, storedProcedureId);
}
public static CosmosStoredProcedureProperties createCosmosStoredProcedureProperties(String jsonString) {
return new CosmosStoredProcedureProperties(jsonString);
}
public static CosmosAsyncTriggerResponse createCosmosAsyncTriggerResponse(ResourceResponse<Trigger> response,
CosmosAsyncContainer container) {
return new CosmosAsyncTriggerResponse(response, container);
}
public static CosmosAsyncUserDefinedFunctionResponse createCosmosAsyncUserDefinedFunctionResponse(ResourceResponse<UserDefinedFunction> response,
CosmosAsyncContainer container) {
return new CosmosAsyncUserDefinedFunctionResponse(response, container);
}
public static CosmosAsyncUserResponse createCosmosAsyncUserResponse(ResourceResponse<User> response, CosmosAsyncDatabase database) {
return new CosmosAsyncUserResponse(response, database);
}
public static CosmosContainerResponse createCosmosContainerResponse(CosmosAsyncContainerResponse response,
CosmosDatabase database, CosmosClient client) {
return new CosmosContainerResponse(response, database, client);
}
public static CosmosUserResponse createCosmosUserResponse(CosmosAsyncUserResponse response, CosmosDatabase database) {
return new CosmosUserResponse(response, database);
}
public static <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosAsyncItemResponse<T> response) {
return new CosmosItemResponse<>(response);
}
public static CosmosDatabaseResponse createCosmosDatabaseResponse(CosmosAsyncDatabaseResponse response, CosmosClient client) {
return new CosmosDatabaseResponse(response, client);
}
public static CosmosStoredProcedureResponse createCosmosStoredProcedureResponse(CosmosAsyncStoredProcedureResponse resourceResponse,
CosmosStoredProcedure storedProcedure) {
return new CosmosStoredProcedureResponse(resourceResponse, storedProcedure);
}
public static CosmosUserDefinedFunctionResponse createCosmosUserDefinedFunctionResponse(CosmosAsyncUserDefinedFunctionResponse resourceResponse,
CosmosUserDefinedFunction userDefinedFunction) {
return new CosmosUserDefinedFunctionResponse(resourceResponse, userDefinedFunction);
}
public static CosmosTriggerResponse createCosmosTriggerResponse(CosmosAsyncTriggerResponse asyncResponse,
CosmosTrigger syncTrigger) {
return new CosmosTriggerResponse(asyncResponse, syncTrigger);
}
public static List<CosmosConflictProperties> getCosmosConflictPropertiesFromV2Results(List<Conflict> results) {
return CosmosConflictProperties.getFromV2Results(results);
}
public static DocumentCollection getV2Collection(CosmosContainerProperties containerProperties) {
return containerProperties.getV2Collection();
}
public static List<CosmosContainerProperties> getCosmosContainerPropertiesFromV2Results(List<DocumentCollection> results) {
return CosmosContainerProperties.getFromV2Results(results);
}
public static List<CosmosDatabaseProperties> getCosmosDatabasePropertiesFromV2Results(List<Database> results) {
return CosmosDatabaseProperties.getFromV2Results(results);
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosAsyncItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static <T> CosmosItemProperties getCosmosItemProperties(CosmosItemResponse<T> cosmosItemResponse) {
return cosmosItemResponse.getProperties();
}
public static Permission getV2Permissions(CosmosPermissionProperties permissionSettings) {
return permissionSettings.getV2Permissions();
}
public static List<CosmosPermissionProperties> getCosmosPermissionPropertiesFromV2Results(List<Permission> results) {
return CosmosPermissionProperties.getFromV2Results(results);
}
public static List<CosmosStoredProcedureProperties> getCosmosStoredProcedurePropertiesFromV2Results(List<StoredProcedure> results) {
return CosmosStoredProcedureProperties.getFromV2Results(results);
}
public static List<CosmosTriggerProperties> getCosmosTriggerPropertiesFromV2Results(List<Trigger> results) {
return CosmosTriggerProperties.getFromV2Results(results);
}
public static List<CosmosUserDefinedFunctionProperties> getCosmosUserDefinedFunctionPropertiesFromV2Results(List<UserDefinedFunction> results) {
return CosmosUserDefinedFunctionProperties.getFromV2Results(results);
}
public static User getV2User(CosmosUserProperties cosmosUserProperties) {
return cosmosUserProperties.getV2User();
}
public static List<CosmosUserProperties> getCosmosUserPropertiesFromV2Results(List<User> results) {
return CosmosUserProperties.getFromV2Results(results);
}
public static RequestOptions toRequestOptions(CosmosConflictRequestOptions cosmosConflictRequestOptions) {
return cosmosConflictRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosContainerRequestOptions cosmosContainerRequestOptions) {
return cosmosContainerRequestOptions.toRequestOptions();
}
public static CosmosContainerRequestOptions setOfferThroughput(CosmosContainerRequestOptions cosmosContainerRequestOptions,
Integer offerThroughput) {
return cosmosContainerRequestOptions.setOfferThroughput(offerThroughput);
}
public static RequestOptions toRequestOptions(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions) {
return cosmosDatabaseRequestOptions.toRequestOptions();
}
public static CosmosDatabaseRequestOptions setOfferThroughput(CosmosDatabaseRequestOptions cosmosDatabaseRequestOptions,
Integer offerThroughput) {
return cosmosDatabaseRequestOptions.setOfferThroughput(offerThroughput);
}
public static CosmosItemRequestOptions setPartitionKey(CosmosItemRequestOptions cosmosItemRequestOptions,
PartitionKey partitionKey) {
return cosmosItemRequestOptions.setPartitionKey(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosItemRequestOptions cosmosItemRequestOptions) {
return cosmosItemRequestOptions.toRequestOptions();
}
public static CosmosItemRequestOptions createCosmosItemRequestOptions(PartitionKey partitionKey) {
return new CosmosItemRequestOptions(partitionKey);
}
public static RequestOptions toRequestOptions(CosmosPermissionRequestOptions cosmosPermissionRequestOptions) {
return cosmosPermissionRequestOptions.toRequestOptions();
}
public static RequestOptions toRequestOptions(CosmosStoredProcedureRequestOptions cosmosStoredProcedureRequestOptions) {
return cosmosStoredProcedureRequestOptions.toRequestOptions();
}
public static String getAddressesLink(DatabaseAccount databaseAccount) {
return databaseAccount.getAddressesLink();
}
public static DatabaseAccount toDatabaseAccount(RxDocumentServiceResponse response) {
DatabaseAccount account = response.getResource(DatabaseAccount.class);
Map<String, String> responseHeader = response.getResponseHeaders();
account.setMaxMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.MAX_MEDIA_STORAGE_USAGE_IN_MB)));
account.setMediaStorageUsageInMB(
Long.valueOf(responseHeader.get(HttpConstants.HttpHeaders.CURRENT_MEDIA_STORAGE_USAGE_IN_MB)));
return account;
}
public static Map<String, Object> getQueryEngineConfiuration(DatabaseAccount databaseAccount) {
return databaseAccount.getQueryEngineConfiguration();
}
public static ReplicationPolicy getReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getReplicationPolicy();
}
public static ReplicationPolicy getSystemReplicationPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getSystemReplicationPolicy();
}
public static ConsistencyPolicy getConsistencyPolicy(DatabaseAccount databaseAccount) {
return databaseAccount.getConsistencyPolicy();
}
/**
* Gets the partitionKeyRangeId.
*
* @param options the feed options
* @return the partitionKeyRangeId.
*/
public static String partitionKeyRangeIdInternal(FeedOptions options) {
return options.getPartitionKeyRangeIdInternal();
}
/**
* Sets the PartitionKeyRangeId.
*
* @param options the feed options
* @param partitionKeyRangeId the partition key range id
* @return the partitionKeyRangeId.
*/
public static FeedOptions partitionKeyRangeIdInternal(FeedOptions options, String partitionKeyRangeId) {
return options.setPartitionKeyRangeIdInternal(partitionKeyRangeId);
}
public static <T extends Resource> FeedResponse<T> toFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(response.getQueryResponse(cls), response.getResponseHeaders());
}
public static <T> FeedResponse<T> toFeedResponsePage(List<T> results, Map<String, String> headers, boolean noChanges) {
return new FeedResponse<>(results, headers, noChanges);
}
public static <T extends Resource> FeedResponse<T> toChaneFeedResponsePage(RxDocumentServiceResponse response,
Class<T> cls) {
return new FeedResponse<T>(noChanges(response) ? Collections.emptyList() : response.getQueryResponse(cls),
response.getResponseHeaders(), noChanges(response));
}
public static <T extends Resource> boolean noChanges(FeedResponse<T> page) {
return page.nochanges;
}
public static <T extends Resource> boolean noChanges(RxDocumentServiceResponse rsp) {
return rsp.getStatusCode() == HttpConstants.StatusCodes.NOT_MODIFIED;
}
public static <T> FeedResponse<T> createFeedResponse(List<T> results,
Map<String, String> headers) {
return new FeedResponse<>(results, headers);
}
public static <T> FeedResponse<T> createFeedResponseWithQueryMetrics(List<T> results,
Map<String, String> headers, ConcurrentMap<String, QueryMetrics> queryMetricsMap) {
return new FeedResponse<>(results, headers, queryMetricsMap);
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetricsMap(FeedResponse<T> feedResponse) {
return feedResponse.queryMetricsMap();
}
public static <T> ConcurrentMap<String, QueryMetrics> queryMetrics(FeedResponse<T> feedResponse) {
return feedResponse.queryMetrics();
}
public static String toLower(RequestVerb verb) {
return verb.toLowerCase();
}
public static boolean isV2(PartitionKeyDefinition pkd) {
return pkd.getVersion() != null && PartitionKeyDefinitionVersion.V2.val == pkd.getVersion().val;
}
public static PartitionKeyInternal getNonePartitionKey(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getNonePartitionKeyValue();
}
public static PartitionKeyInternal getPartitionKeyInternal(PartitionKey partitionKey) {
return partitionKey.getInternalPartitionKey();
}
public static PartitionKey partitionKeyfromJsonString(String jsonString) {
return PartitionKey.fromJsonString(jsonString);
}
public static Object getPartitionKeyObject(PartitionKey right) {
return right.getKeyObject();
}
public static String getAltLink(Resource resource) {
return resource.getAltLink();
}
public static void setAltLink(Resource resource, String altLink) {
resource.setAltLink(altLink);
}
public static void setResourceSelfLink(Resource resource, String selfLink) {
resource.setSelfLink(selfLink);
}
public static void setTimestamp(Resource resource, OffsetDateTime date) {
resource.setTimestamp(date);
}
public static void validateResource(Resource resource) {
Resource.validateResource(resource);
}
public static <T> void setProperty(JsonSerializable jsonSerializable, String propertyName, T value) {
jsonSerializable.set(propertyName, value);
}
public static ObjectNode getObject(JsonSerializable jsonSerializable, String propertyName) {
return jsonSerializable.getObject(propertyName);
}
public static void remove(JsonSerializable jsonSerializable, String propertyName) {
jsonSerializable.remove(propertyName);
}
public static Object getValue(JsonNode value) {
return JsonSerializable.getValue(value);
}
public static CosmosError createCosmosError(ObjectNode objectNode) {
return new CosmosError(objectNode);
}
public static CosmosError createCosmosError(String jsonString) {
return new CosmosError(jsonString);
}
public static void populatePropertyBagJsonSerializable(JsonSerializable jsonSerializable) {
jsonSerializable.populatePropertyBag();
}
public static JsonSerializable instantiateJsonSerializable(ObjectNode objectNode, Class<?> klassType) {
try {
if (klassType.equals(Document.class) || klassType.equals(OrderByRowResult.class) || klassType.equals(CosmosItemProperties.class)
|| klassType.equals(PartitionKeyRange.class) || klassType.equals(Range.class)
|| klassType.equals(QueryInfo.class) || klassType.equals(PartitionedQueryExecutionInfoInternal.class)
|| klassType.equals(QueryItem.class)
|| klassType.equals(Address.class)
|| klassType.equals(DatabaseAccount.class) || klassType.equals(DatabaseAccountLocation.class)
|| klassType.equals(ReplicationPolicy.class) || klassType.equals(ConsistencyPolicy.class)
|| klassType.equals(DocumentCollection.class) || klassType.equals(Database.class)) {
return (JsonSerializable) klassType.getDeclaredConstructor(ObjectNode.class).newInstance(objectNode);
} else {
return (JsonSerializable) klassType.getDeclaredConstructor(String.class).newInstance(Utils.toJson(Utils.getSimpleObjectMapper(), objectNode));
}
} catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | IllegalArgumentException e) {
throw new IllegalArgumentException(e);
}
}
} |
I am not sure about this one. @milismsft - can you please look at this `break` statement introduction change ? | public Mono<Void> run(CancellationToken cancellationToken) {
this.lastContinuation = this.settings.getStartContinuation();
this.isFirstQueryForChangeFeeds = true;
this.options.setRequestContinuation(this.lastContinuation);
return Flux.just(this)
.flatMap( value -> {
if (cancellationToken.isCancellationRequested()) {
return Flux.empty();
}
if(this.isFirstQueryForChangeFeeds) {
this.isFirstQueryForChangeFeeds = false;
return Flux.just(value);
}
ZonedDateTime stopTimer = ZonedDateTime.now().plus(this.settings.getFeedPollDelay());
return Mono.just(value)
.delayElement(Duration.ofMillis(100))
.repeat( () -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
.flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(),
this.options)
.limitRequest(1)
)
.flatMap(documentFeedResponse -> {
if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException());
this.lastContinuation = documentFeedResponse.getContinuationToken();
if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) {
return this.dispatchChanges(documentFeedResponse)
.doOnError(throwable -> {
logger.debug("Exception was thrown from thread {}", Thread.currentThread().getId(), throwable);
})
.doOnSuccess((Void) -> {
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException();
});
}
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) {
return Flux.error(new TaskCancelledException());
}
return Flux.empty();
})
.doOnComplete(() -> {
if (this.options.getMaxItemCount().compareTo(this.settings.getMaxItemCount()) != 0) {
this.options.setMaxItemCount(this.settings.getMaxItemCount());
}
})
.onErrorResume(throwable -> {
if (throwable instanceof CosmosClientException) {
CosmosClientException clientException = (CosmosClientException) throwable;
logger.warn("CosmosClientException: partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId(), clientException);
StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException);
switch (docDbError) {
case PARTITION_NOT_FOUND: {
this.resultException = new PartitionNotFoundException("Partition not found.", this.lastContinuation);
}
break;
case PARTITION_SPLIT: {
this.resultException = new PartitionSplitException("Partition split.", this.lastContinuation);
}
break;
case UNDEFINED: {
this.resultException = new RuntimeException(clientException);
}
break;
case MAX_ITEM_COUNT_TOO_LARGE: {
if (this.options.getMaxItemCount() == null) {
this.options.setMaxItemCount(DefaultMaxItemCount);
} else if (this.options.getMaxItemCount() <= 1) {
logger.error("Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException);
this.resultException = new RuntimeException(clientException);
}
this.options.setMaxItemCount(this.options.getMaxItemCount() / 2);
logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount());
return Flux.empty();
}
case TRANSIENT_ERROR: {
if (clientException.getRetryAfterDuration().toMillis() > 0) {
ZonedDateTime stopTimer = ZonedDateTime.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS);
return Mono.just(clientException.getRetryAfterDuration().toMillis())
.delayElement(Duration.ofMillis(100))
.repeat(() -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).flatMap(values -> Flux.empty());
}
}
break;
default: {
logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException);
this.resultException = new RuntimeException(clientException);
}
}
} else if (throwable instanceof LeaseLostException) {
logger.info("LeaseLoseException with partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId());
this.resultException = (LeaseLostException) throwable;
} else if (throwable instanceof TaskCancelledException) {
logger.debug("Task cancelled exception: partition {} from {}",
this.settings.getPartitionKeyRangeId(), Thread.currentThread().getId(), throwable);
this.resultException = (TaskCancelledException) throwable;
} else {
logger.warn("Unexpected exception from thread {}", Thread.currentThread().getId(), throwable);
this.resultException = new RuntimeException(throwable);
}
return Flux.error(throwable);
})
.repeat(() -> {
if (cancellationToken.isCancellationRequested()) {
this.resultException = new TaskCancelledException();
return false;
}
return true;
})
.onErrorResume(throwable -> {
if (this.resultException == null) {
this.resultException = new RuntimeException(throwable);
}
return Flux.empty();
}).then();
} | public Mono<Void> run(CancellationToken cancellationToken) {
this.lastContinuation = this.settings.getStartContinuation();
this.isFirstQueryForChangeFeeds = true;
this.options.setRequestContinuation(this.lastContinuation);
return Flux.just(this)
.flatMap( value -> {
if (cancellationToken.isCancellationRequested()) {
return Flux.empty();
}
if(this.isFirstQueryForChangeFeeds) {
this.isFirstQueryForChangeFeeds = false;
return Flux.just(value);
}
ZonedDateTime stopTimer = ZonedDateTime.now().plus(this.settings.getFeedPollDelay());
return Mono.just(value)
.delayElement(Duration.ofMillis(100))
.repeat( () -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
.flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(),
this.options)
.limitRequest(1)
)
.flatMap(documentFeedResponse -> {
if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException());
this.lastContinuation = documentFeedResponse.getContinuationToken();
if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) {
return this.dispatchChanges(documentFeedResponse)
.doOnError(throwable -> {
logger.debug("Exception was thrown from thread {}", Thread.currentThread().getId(), throwable);
})
.doOnSuccess((Void) -> {
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException();
});
}
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) {
return Flux.error(new TaskCancelledException());
}
return Flux.empty();
})
.doOnComplete(() -> {
if (this.options.getMaxItemCount().compareTo(this.settings.getMaxItemCount()) != 0) {
this.options.setMaxItemCount(this.settings.getMaxItemCount());
}
})
.onErrorResume(throwable -> {
if (throwable instanceof CosmosClientException) {
CosmosClientException clientException = (CosmosClientException) throwable;
logger.warn("CosmosClientException: partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId(), clientException);
StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException);
switch (docDbError) {
case PARTITION_NOT_FOUND: {
this.resultException = new PartitionNotFoundException("Partition not found.", this.lastContinuation);
}
break;
case PARTITION_SPLIT: {
this.resultException = new PartitionSplitException("Partition split.", this.lastContinuation);
}
break;
case UNDEFINED: {
this.resultException = new RuntimeException(clientException);
}
break;
case MAX_ITEM_COUNT_TOO_LARGE: {
if (this.options.getMaxItemCount() == null) {
this.options.setMaxItemCount(DefaultMaxItemCount);
} else if (this.options.getMaxItemCount() <= 1) {
logger.error("Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException);
this.resultException = new RuntimeException(clientException);
}
this.options.setMaxItemCount(this.options.getMaxItemCount() / 2);
logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount());
return Flux.empty();
}
case TRANSIENT_ERROR: {
if (clientException.getRetryAfterDuration().toMillis() > 0) {
ZonedDateTime stopTimer = ZonedDateTime.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS);
return Mono.just(clientException.getRetryAfterDuration().toMillis())
.delayElement(Duration.ofMillis(100))
.repeat(() -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).flatMap(values -> Flux.empty());
}
}
break;
default: {
logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException);
this.resultException = new RuntimeException(clientException);
}
}
} else if (throwable instanceof LeaseLostException) {
logger.info("LeaseLoseException with partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId());
this.resultException = (LeaseLostException) throwable;
} else if (throwable instanceof TaskCancelledException) {
logger.debug("Task cancelled exception: partition {} from {}",
this.settings.getPartitionKeyRangeId(), Thread.currentThread().getId(), throwable);
this.resultException = (TaskCancelledException) throwable;
} else {
logger.warn("Unexpected exception from thread {}", Thread.currentThread().getId(), throwable);
this.resultException = new RuntimeException(throwable);
}
return Flux.error(throwable);
})
.repeat(() -> {
if (cancellationToken.isCancellationRequested()) {
this.resultException = new TaskCancelledException();
return false;
}
return true;
})
.onErrorResume(throwable -> {
if (this.resultException == null) {
this.resultException = new RuntimeException(throwable);
}
return Flux.empty();
}).then();
} | class PartitionProcessorImpl implements PartitionProcessor {
private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class);
private static final int DefaultMaxItemCount = 100;
private final ProcessorSettings settings;
private final PartitionCheckpointer checkpointer;
private final ChangeFeedObserver observer;
private final ChangeFeedOptions options;
private final ChangeFeedContextClient documentClient;
private volatile RuntimeException resultException;
private volatile String lastContinuation;
private volatile boolean isFirstQueryForChangeFeeds;
public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer) {
this.observer = observer;
this.documentClient = documentClient;
this.settings = settings;
this.checkpointer = checkpointer;
this.options = new ChangeFeedOptions();
this.options.setMaxItemCount(settings.getMaxItemCount());
this.options.setPartitionKeyRangeId(settings.getPartitionKeyRangeId());
this.options.setStartFromBeginning(settings.isStartFromBeginning());
this.options.setRequestContinuation(settings.getStartContinuation());
this.options.setStartDateTime(settings.getStartTime());
}
@Override
@Override
public RuntimeException getResultException() {
return this.resultException;
}
private Mono<Void> dispatchChanges(FeedResponse<JsonNode> response) {
ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl(this.settings.getPartitionKeyRangeId(), response, this.checkpointer);
return this.observer.processChanges(context, response.getResults());
}
} | class PartitionProcessorImpl implements PartitionProcessor {
private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class);
private static final int DefaultMaxItemCount = 100;
private final ProcessorSettings settings;
private final PartitionCheckpointer checkpointer;
private final ChangeFeedObserver observer;
private final ChangeFeedOptions options;
private final ChangeFeedContextClient documentClient;
private volatile RuntimeException resultException;
private volatile String lastContinuation;
private volatile boolean isFirstQueryForChangeFeeds;
public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer) {
this.observer = observer;
this.documentClient = documentClient;
this.settings = settings;
this.checkpointer = checkpointer;
this.options = new ChangeFeedOptions();
this.options.setMaxItemCount(settings.getMaxItemCount());
this.options.setPartitionKeyRangeId(settings.getPartitionKeyRangeId());
this.options.setStartFromBeginning(settings.isStartFromBeginning());
this.options.setRequestContinuation(settings.getStartContinuation());
this.options.setStartDateTime(settings.getStartTime());
}
@Override
@Override
public RuntimeException getResultException() {
return this.resultException;
}
private Mono<Void> dispatchChanges(FeedResponse<JsonNode> response) {
ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl(this.settings.getPartitionKeyRangeId(), response, this.checkpointer);
return this.observer.processChanges(context, response.getResults());
}
} | |
Is it possible to suppress this warning and create a work item for this? | public int hashCode() {
if (this.components == null || this.components.size() == 0) {
return 0;
}
int [] ordinals = new int[this.components.size()];
for (int i = 0; i < this.components.size(); i++) {
ordinals[i] = this.components.get(i).GetTypeOrdinal();
}
return Objects.hashCode(ordinals);
} | } | public int hashCode() {
return super.hashCode();
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} |
Is it possible to suppress this warning and create a work item for this? | public int hashCode() {
return Objects.hash(this.version, this.globalLsn, this.localLsnByRegion);
} | return Objects.hash(this.version, this.globalLsn, this.localLsnByRegion); | public int hashCode() {
return super.hashCode();
} | class VectorSessionToken implements ISessionToken {
private final static Logger logger = LoggerFactory.getLogger(VectorSessionToken.class);
private final static char SegmentSeparator = '
private final static char RegionProgressSeparator = '=';
private final long version;
private final long globalLsn;
private final UnmodifiableMap<Integer, Long> localLsnByRegion;
private final String sessionToken;
private VectorSessionToken(long version, long globalLsn, UnmodifiableMap<Integer, Long> localLsnByRegion) {
this(version, globalLsn, localLsnByRegion, null);
}
private VectorSessionToken(long version, long globalLsn, UnmodifiableMap<Integer, Long> localLsnByRegion, String sessionToken) {
this.version = version;
this.globalLsn = globalLsn;
this.localLsnByRegion = localLsnByRegion;
if (sessionToken == null) {
String regionProgress = String.join(
Character.toString(VectorSessionToken.SegmentSeparator),
localLsnByRegion.
entrySet()
.stream()
.map(kvp -> new StringBuilder().append(kvp.getKey()).append(VectorSessionToken.RegionProgressSeparator).append(kvp.getValue()))
.collect(Collectors.toList()));
if (Strings.isNullOrEmpty(regionProgress)) {
StringBuilder sb = new StringBuilder();
sb.append(this.version)
.append(VectorSessionToken.SegmentSeparator)
.append(this.globalLsn);
this.sessionToken = sb.toString();
} else {
StringBuilder sb = new StringBuilder();
sb.append(this.version)
.append(VectorSessionToken.SegmentSeparator)
.append(this.globalLsn)
.append(VectorSessionToken.SegmentSeparator)
.append(regionProgress);
this.sessionToken = sb.toString();
}
} else {
this.sessionToken = sessionToken;
}
}
public static boolean tryCreate(String sessionToken, ValueHolder<ISessionToken> parsedSessionToken) {
ValueHolder<Long> versionHolder = ValueHolder.initialize(-1l);
ValueHolder<Long> globalLsnHolder = ValueHolder.initialize(-1l);
ValueHolder<UnmodifiableMap<Integer, Long>> localLsnByRegion = ValueHolder.initialize(null);
if (VectorSessionToken.tryParseSessionToken(
sessionToken,
versionHolder,
globalLsnHolder,
localLsnByRegion)) {
parsedSessionToken.v = new VectorSessionToken(versionHolder.v, globalLsnHolder.v, localLsnByRegion.v, sessionToken);
return true;
} else {
return false;
}
}
public long getLSN() {
return this.globalLsn;
}
@Override
public boolean equals(Object obj) {
VectorSessionToken other = Utils.as(obj, VectorSessionToken.class);
if (other == null) {
return false;
}
return this.version == other.version
&& this.globalLsn == other.globalLsn
&& this.areRegionProgressEqual(other.localLsnByRegion);
}
@Override
public boolean isValid(ISessionToken otherSessionToken) throws CosmosClientException {
VectorSessionToken other = Utils.as(otherSessionToken, VectorSessionToken.class);
if (other == null) {
throw new IllegalArgumentException("otherSessionToken");
}
if (other.version < this.version || other.globalLsn < this.globalLsn) {
return false;
}
if (other.version == this.version && other.localLsnByRegion.size() != this.localLsnByRegion.size()) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
}
for (Map.Entry<Integer, Long> kvp : other.localLsnByRegion.entrySet()) {
Integer regionId = kvp.getKey();
long otherLocalLsn = kvp.getValue();
ValueHolder<Long> localLsn = ValueHolder.initialize(-1l);
if (!Utils.tryGetValue(this.localLsnByRegion, regionId, localLsn)) {
if (this.version == other.version) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
} else {
}
} else {
if (otherLocalLsn < localLsn.v) {
return false;
}
}
}
return true;
}
public ISessionToken merge(ISessionToken obj) throws CosmosClientException {
VectorSessionToken other = Utils.as(obj, VectorSessionToken.class);
if (other == null) {
throw new IllegalArgumentException("obj");
}
if (this.version == other.version && this.localLsnByRegion.size() != other.localLsnByRegion.size()) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
}
VectorSessionToken sessionTokenWithHigherVersion;
VectorSessionToken sessionTokenWithLowerVersion;
if (this.version < other.version) {
sessionTokenWithLowerVersion = this;
sessionTokenWithHigherVersion = other;
} else {
sessionTokenWithLowerVersion = other;
sessionTokenWithHigherVersion = this;
}
Map<Integer, Long> highestLocalLsnByRegion = new HashMap<>();
for (Map.Entry<Integer, Long> kvp : sessionTokenWithHigherVersion.localLsnByRegion.entrySet()) {
Integer regionId = kvp.getKey();
long localLsn1 = kvp.getValue();
ValueHolder<Long> localLsn2 = ValueHolder.initialize(-1l);
if (Utils.tryGetValue(sessionTokenWithLowerVersion.localLsnByRegion, regionId, localLsn2)) {
highestLocalLsnByRegion.put(regionId, Math.max(localLsn1, localLsn2.v));
} else if (this.version == other.version) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
} else {
highestLocalLsnByRegion.put(regionId, localLsn1);
}
}
return new VectorSessionToken(
Math.max(this.version, other.version),
Math.max(this.globalLsn, other.globalLsn),
(UnmodifiableMap<Integer, Long>) UnmodifiableMap.unmodifiableMap(highestLocalLsnByRegion));
}
public String convertToString() {
return this.sessionToken;
}
private boolean areRegionProgressEqual(UnmodifiableMap<Integer, Long> other) {
if (this.localLsnByRegion.size() != other.size()) {
return false;
}
for (Map.Entry<Integer, Long> kvp : this.localLsnByRegion.entrySet()) {
Integer regionId = kvp.getKey();
ValueHolder<Long> localLsn1 = ValueHolder.initialize(kvp.getValue());
ValueHolder<Long> localLsn2 = ValueHolder.initialize(-1l);
if (Utils.tryGetValue(other, regionId, localLsn2)) {
if (ObjectUtils.notEqual(localLsn1.v, localLsn2.v)) {
return false;
}
}
}
return true;
}
private static boolean tryParseSessionToken(
String sessionToken,
ValueHolder<Long> version,
ValueHolder<Long> globalLsn,
ValueHolder<UnmodifiableMap<Integer, Long>> localLsnByRegion) {
version.v = 0L;
localLsnByRegion.v = null;
globalLsn.v = -1L;
if (Strings.isNullOrEmpty(sessionToken)) {
logger.warn("SESSION token is empty");
return false;
}
String[] segments = StringUtils.split(sessionToken, VectorSessionToken.SegmentSeparator);
if (segments.length < 2) {
return false;
}
if (!tryParseLong(segments[0], version)
|| !tryParseLong(segments[1], globalLsn)) {
logger.warn("Unexpected session token version number '{}' OR global lsn '{}'.", segments[0], segments[1]);
return false;
}
Map<Integer, Long> lsnByRegion = new HashMap<>();
for (int i = 2; i < segments.length; i++) {
String regionSegment = segments[i];
String[] regionIdWithLsn = StringUtils.split(regionSegment, VectorSessionToken.RegionProgressSeparator);
if (regionIdWithLsn.length != 2) {
logger.warn("Unexpected region progress segment length '{}' in session token.", regionIdWithLsn.length);
return false;
}
ValueHolder<Integer> regionId = ValueHolder.initialize(0);
ValueHolder<Long> localLsn = ValueHolder.initialize(-1l);
if (!tryParseInt(regionIdWithLsn[0], regionId)
|| !tryParseLong(regionIdWithLsn[1], localLsn)) {
logger.warn("Unexpected region progress '{}' for region '{}' in session token.", regionIdWithLsn[0], regionIdWithLsn[1]);
return false;
}
lsnByRegion.put(regionId.v, localLsn.v);
}
localLsnByRegion.v = (UnmodifiableMap<Integer, Long>) UnmodifiableMap.unmodifiableMap(lsnByRegion);
return true;
}
private static boolean tryParseLong(String str, ValueHolder<Long> value) {
try {
value.v = Long.parseLong(str);
return true;
} catch (Exception e) {
return false;
}
}
private static boolean tryParseInt(String str, ValueHolder<Integer> value) {
try {
value.v = Integer.parseInt(str);
return true;
} catch (Exception e) {
return false;
}
}
} | class VectorSessionToken implements ISessionToken {
private final static Logger logger = LoggerFactory.getLogger(VectorSessionToken.class);
private final static char SegmentSeparator = '
private final static char RegionProgressSeparator = '=';
private final long version;
private final long globalLsn;
private final UnmodifiableMap<Integer, Long> localLsnByRegion;
private final String sessionToken;
private VectorSessionToken(long version, long globalLsn, UnmodifiableMap<Integer, Long> localLsnByRegion) {
this(version, globalLsn, localLsnByRegion, null);
}
private VectorSessionToken(long version, long globalLsn, UnmodifiableMap<Integer, Long> localLsnByRegion, String sessionToken) {
this.version = version;
this.globalLsn = globalLsn;
this.localLsnByRegion = localLsnByRegion;
if (sessionToken == null) {
String regionProgress = String.join(
Character.toString(VectorSessionToken.SegmentSeparator),
localLsnByRegion.
entrySet()
.stream()
.map(kvp -> new StringBuilder().append(kvp.getKey()).append(VectorSessionToken.RegionProgressSeparator).append(kvp.getValue()))
.collect(Collectors.toList()));
if (Strings.isNullOrEmpty(regionProgress)) {
StringBuilder sb = new StringBuilder();
sb.append(this.version)
.append(VectorSessionToken.SegmentSeparator)
.append(this.globalLsn);
this.sessionToken = sb.toString();
} else {
StringBuilder sb = new StringBuilder();
sb.append(this.version)
.append(VectorSessionToken.SegmentSeparator)
.append(this.globalLsn)
.append(VectorSessionToken.SegmentSeparator)
.append(regionProgress);
this.sessionToken = sb.toString();
}
} else {
this.sessionToken = sessionToken;
}
}
public static boolean tryCreate(String sessionToken, ValueHolder<ISessionToken> parsedSessionToken) {
ValueHolder<Long> versionHolder = ValueHolder.initialize(-1l);
ValueHolder<Long> globalLsnHolder = ValueHolder.initialize(-1l);
ValueHolder<UnmodifiableMap<Integer, Long>> localLsnByRegion = ValueHolder.initialize(null);
if (VectorSessionToken.tryParseSessionToken(
sessionToken,
versionHolder,
globalLsnHolder,
localLsnByRegion)) {
parsedSessionToken.v = new VectorSessionToken(versionHolder.v, globalLsnHolder.v, localLsnByRegion.v, sessionToken);
return true;
} else {
return false;
}
}
public long getLSN() {
return this.globalLsn;
}
@Override
public boolean equals(Object obj) {
VectorSessionToken other = Utils.as(obj, VectorSessionToken.class);
if (other == null) {
return false;
}
return this.version == other.version
&& this.globalLsn == other.globalLsn
&& this.areRegionProgressEqual(other.localLsnByRegion);
}
@Override
public boolean isValid(ISessionToken otherSessionToken) throws CosmosClientException {
VectorSessionToken other = Utils.as(otherSessionToken, VectorSessionToken.class);
if (other == null) {
throw new IllegalArgumentException("otherSessionToken");
}
if (other.version < this.version || other.globalLsn < this.globalLsn) {
return false;
}
if (other.version == this.version && other.localLsnByRegion.size() != this.localLsnByRegion.size()) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
}
for (Map.Entry<Integer, Long> kvp : other.localLsnByRegion.entrySet()) {
Integer regionId = kvp.getKey();
long otherLocalLsn = kvp.getValue();
ValueHolder<Long> localLsn = ValueHolder.initialize(-1l);
if (!Utils.tryGetValue(this.localLsnByRegion, regionId, localLsn)) {
if (this.version == other.version) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
} else {
}
} else {
if (otherLocalLsn < localLsn.v) {
return false;
}
}
}
return true;
}
public ISessionToken merge(ISessionToken obj) throws CosmosClientException {
VectorSessionToken other = Utils.as(obj, VectorSessionToken.class);
if (other == null) {
throw new IllegalArgumentException("obj");
}
if (this.version == other.version && this.localLsnByRegion.size() != other.localLsnByRegion.size()) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
}
VectorSessionToken sessionTokenWithHigherVersion;
VectorSessionToken sessionTokenWithLowerVersion;
if (this.version < other.version) {
sessionTokenWithLowerVersion = this;
sessionTokenWithHigherVersion = other;
} else {
sessionTokenWithLowerVersion = other;
sessionTokenWithHigherVersion = this;
}
Map<Integer, Long> highestLocalLsnByRegion = new HashMap<>();
for (Map.Entry<Integer, Long> kvp : sessionTokenWithHigherVersion.localLsnByRegion.entrySet()) {
Integer regionId = kvp.getKey();
long localLsn1 = kvp.getValue();
ValueHolder<Long> localLsn2 = ValueHolder.initialize(-1l);
if (Utils.tryGetValue(sessionTokenWithLowerVersion.localLsnByRegion, regionId, localLsn2)) {
highestLocalLsnByRegion.put(regionId, Math.max(localLsn1, localLsn2.v));
} else if (this.version == other.version) {
throw new InternalServerErrorException(
String.format(RMResources.InvalidRegionsInSessionToken, this.sessionToken, other.sessionToken));
} else {
highestLocalLsnByRegion.put(regionId, localLsn1);
}
}
return new VectorSessionToken(
Math.max(this.version, other.version),
Math.max(this.globalLsn, other.globalLsn),
(UnmodifiableMap<Integer, Long>) UnmodifiableMap.unmodifiableMap(highestLocalLsnByRegion));
}
public String convertToString() {
return this.sessionToken;
}
private boolean areRegionProgressEqual(UnmodifiableMap<Integer, Long> other) {
if (this.localLsnByRegion.size() != other.size()) {
return false;
}
for (Map.Entry<Integer, Long> kvp : this.localLsnByRegion.entrySet()) {
Integer regionId = kvp.getKey();
ValueHolder<Long> localLsn1 = ValueHolder.initialize(kvp.getValue());
ValueHolder<Long> localLsn2 = ValueHolder.initialize(-1l);
if (Utils.tryGetValue(other, regionId, localLsn2)) {
if (ObjectUtils.notEqual(localLsn1.v, localLsn2.v)) {
return false;
}
}
}
return true;
}
private static boolean tryParseSessionToken(
String sessionToken,
ValueHolder<Long> version,
ValueHolder<Long> globalLsn,
ValueHolder<UnmodifiableMap<Integer, Long>> localLsnByRegion) {
version.v = 0L;
localLsnByRegion.v = null;
globalLsn.v = -1L;
if (Strings.isNullOrEmpty(sessionToken)) {
logger.warn("SESSION token is empty");
return false;
}
String[] segments = StringUtils.split(sessionToken, VectorSessionToken.SegmentSeparator);
if (segments.length < 2) {
return false;
}
if (!tryParseLong(segments[0], version)
|| !tryParseLong(segments[1], globalLsn)) {
logger.warn("Unexpected session token version number '{}' OR global lsn '{}'.", segments[0], segments[1]);
return false;
}
Map<Integer, Long> lsnByRegion = new HashMap<>();
for (int i = 2; i < segments.length; i++) {
String regionSegment = segments[i];
String[] regionIdWithLsn = StringUtils.split(regionSegment, VectorSessionToken.RegionProgressSeparator);
if (regionIdWithLsn.length != 2) {
logger.warn("Unexpected region progress segment length '{}' in session token.", regionIdWithLsn.length);
return false;
}
ValueHolder<Integer> regionId = ValueHolder.initialize(0);
ValueHolder<Long> localLsn = ValueHolder.initialize(-1l);
if (!tryParseInt(regionIdWithLsn[0], regionId)
|| !tryParseLong(regionIdWithLsn[1], localLsn)) {
logger.warn("Unexpected region progress '{}' for region '{}' in session token.", regionIdWithLsn[0], regionIdWithLsn[1]);
return false;
}
lsnByRegion.put(regionId.v, localLsn.v);
}
localLsnByRegion.v = (UnmodifiableMap<Integer, Long>) UnmodifiableMap.unmodifiableMap(lsnByRegion);
return true;
}
private static boolean tryParseLong(String str, ValueHolder<Long> value) {
try {
value.v = Long.parseLong(str);
return true;
} catch (Exception e) {
return false;
}
}
private static boolean tryParseInt(String str, ValueHolder<Integer> value) {
try {
value.v = Integer.parseInt(str);
return true;
} catch (Exception e) {
return false;
}
}
} |
Looks good. | public Mono<Void> run(CancellationToken cancellationToken) {
this.lastContinuation = this.settings.getStartContinuation();
this.isFirstQueryForChangeFeeds = true;
this.options.setRequestContinuation(this.lastContinuation);
return Flux.just(this)
.flatMap( value -> {
if (cancellationToken.isCancellationRequested()) {
return Flux.empty();
}
if(this.isFirstQueryForChangeFeeds) {
this.isFirstQueryForChangeFeeds = false;
return Flux.just(value);
}
ZonedDateTime stopTimer = ZonedDateTime.now().plus(this.settings.getFeedPollDelay());
return Mono.just(value)
.delayElement(Duration.ofMillis(100))
.repeat( () -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
.flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(),
this.options)
.limitRequest(1)
)
.flatMap(documentFeedResponse -> {
if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException());
this.lastContinuation = documentFeedResponse.getContinuationToken();
if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) {
return this.dispatchChanges(documentFeedResponse)
.doOnError(throwable -> {
logger.debug("Exception was thrown from thread {}", Thread.currentThread().getId(), throwable);
})
.doOnSuccess((Void) -> {
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException();
});
}
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) {
return Flux.error(new TaskCancelledException());
}
return Flux.empty();
})
.doOnComplete(() -> {
if (this.options.getMaxItemCount().compareTo(this.settings.getMaxItemCount()) != 0) {
this.options.setMaxItemCount(this.settings.getMaxItemCount());
}
})
.onErrorResume(throwable -> {
if (throwable instanceof CosmosClientException) {
CosmosClientException clientException = (CosmosClientException) throwable;
logger.warn("CosmosClientException: partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId(), clientException);
StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException);
switch (docDbError) {
case PARTITION_NOT_FOUND: {
this.resultException = new PartitionNotFoundException("Partition not found.", this.lastContinuation);
}
break;
case PARTITION_SPLIT: {
this.resultException = new PartitionSplitException("Partition split.", this.lastContinuation);
}
break;
case UNDEFINED: {
this.resultException = new RuntimeException(clientException);
}
break;
case MAX_ITEM_COUNT_TOO_LARGE: {
if (this.options.getMaxItemCount() == null) {
this.options.setMaxItemCount(DefaultMaxItemCount);
} else if (this.options.getMaxItemCount() <= 1) {
logger.error("Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException);
this.resultException = new RuntimeException(clientException);
}
this.options.setMaxItemCount(this.options.getMaxItemCount() / 2);
logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount());
return Flux.empty();
}
case TRANSIENT_ERROR: {
if (clientException.getRetryAfterDuration().toMillis() > 0) {
ZonedDateTime stopTimer = ZonedDateTime.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS);
return Mono.just(clientException.getRetryAfterDuration().toMillis())
.delayElement(Duration.ofMillis(100))
.repeat(() -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).flatMap(values -> Flux.empty());
}
}
break;
default: {
logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException);
this.resultException = new RuntimeException(clientException);
}
}
} else if (throwable instanceof LeaseLostException) {
logger.info("LeaseLoseException with partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId());
this.resultException = (LeaseLostException) throwable;
} else if (throwable instanceof TaskCancelledException) {
logger.debug("Task cancelled exception: partition {} from {}",
this.settings.getPartitionKeyRangeId(), Thread.currentThread().getId(), throwable);
this.resultException = (TaskCancelledException) throwable;
} else {
logger.warn("Unexpected exception from thread {}", Thread.currentThread().getId(), throwable);
this.resultException = new RuntimeException(throwable);
}
return Flux.error(throwable);
})
.repeat(() -> {
if (cancellationToken.isCancellationRequested()) {
this.resultException = new TaskCancelledException();
return false;
}
return true;
})
.onErrorResume(throwable -> {
if (this.resultException == null) {
this.resultException = new RuntimeException(throwable);
}
return Flux.empty();
}).then();
} | public Mono<Void> run(CancellationToken cancellationToken) {
this.lastContinuation = this.settings.getStartContinuation();
this.isFirstQueryForChangeFeeds = true;
this.options.setRequestContinuation(this.lastContinuation);
return Flux.just(this)
.flatMap( value -> {
if (cancellationToken.isCancellationRequested()) {
return Flux.empty();
}
if(this.isFirstQueryForChangeFeeds) {
this.isFirstQueryForChangeFeeds = false;
return Flux.just(value);
}
ZonedDateTime stopTimer = ZonedDateTime.now().plus(this.settings.getFeedPollDelay());
return Mono.just(value)
.delayElement(Duration.ofMillis(100))
.repeat( () -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).last();
})
.flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(),
this.options)
.limitRequest(1)
)
.flatMap(documentFeedResponse -> {
if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException());
this.lastContinuation = documentFeedResponse.getContinuationToken();
if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) {
return this.dispatchChanges(documentFeedResponse)
.doOnError(throwable -> {
logger.debug("Exception was thrown from thread {}", Thread.currentThread().getId(), throwable);
})
.doOnSuccess((Void) -> {
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException();
});
}
this.options.setRequestContinuation(this.lastContinuation);
if (cancellationToken.isCancellationRequested()) {
return Flux.error(new TaskCancelledException());
}
return Flux.empty();
})
.doOnComplete(() -> {
if (this.options.getMaxItemCount().compareTo(this.settings.getMaxItemCount()) != 0) {
this.options.setMaxItemCount(this.settings.getMaxItemCount());
}
})
.onErrorResume(throwable -> {
if (throwable instanceof CosmosClientException) {
CosmosClientException clientException = (CosmosClientException) throwable;
logger.warn("CosmosClientException: partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId(), clientException);
StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException);
switch (docDbError) {
case PARTITION_NOT_FOUND: {
this.resultException = new PartitionNotFoundException("Partition not found.", this.lastContinuation);
}
break;
case PARTITION_SPLIT: {
this.resultException = new PartitionSplitException("Partition split.", this.lastContinuation);
}
break;
case UNDEFINED: {
this.resultException = new RuntimeException(clientException);
}
break;
case MAX_ITEM_COUNT_TOO_LARGE: {
if (this.options.getMaxItemCount() == null) {
this.options.setMaxItemCount(DefaultMaxItemCount);
} else if (this.options.getMaxItemCount() <= 1) {
logger.error("Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException);
this.resultException = new RuntimeException(clientException);
}
this.options.setMaxItemCount(this.options.getMaxItemCount() / 2);
logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount());
return Flux.empty();
}
case TRANSIENT_ERROR: {
if (clientException.getRetryAfterDuration().toMillis() > 0) {
ZonedDateTime stopTimer = ZonedDateTime.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS);
return Mono.just(clientException.getRetryAfterDuration().toMillis())
.delayElement(Duration.ofMillis(100))
.repeat(() -> {
ZonedDateTime currentTime = ZonedDateTime.now();
return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer);
}).flatMap(values -> Flux.empty());
}
}
break;
default: {
logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException);
this.resultException = new RuntimeException(clientException);
}
}
} else if (throwable instanceof LeaseLostException) {
logger.info("LeaseLoseException with partition {} from thread {}",
BridgeInternal.getPartitionKeyInternal(this.options.getPartitionKey()), Thread.currentThread().getId());
this.resultException = (LeaseLostException) throwable;
} else if (throwable instanceof TaskCancelledException) {
logger.debug("Task cancelled exception: partition {} from {}",
this.settings.getPartitionKeyRangeId(), Thread.currentThread().getId(), throwable);
this.resultException = (TaskCancelledException) throwable;
} else {
logger.warn("Unexpected exception from thread {}", Thread.currentThread().getId(), throwable);
this.resultException = new RuntimeException(throwable);
}
return Flux.error(throwable);
})
.repeat(() -> {
if (cancellationToken.isCancellationRequested()) {
this.resultException = new TaskCancelledException();
return false;
}
return true;
})
.onErrorResume(throwable -> {
if (this.resultException == null) {
this.resultException = new RuntimeException(throwable);
}
return Flux.empty();
}).then();
} | class PartitionProcessorImpl implements PartitionProcessor {
private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class);
private static final int DefaultMaxItemCount = 100;
private final ProcessorSettings settings;
private final PartitionCheckpointer checkpointer;
private final ChangeFeedObserver observer;
private final ChangeFeedOptions options;
private final ChangeFeedContextClient documentClient;
private volatile RuntimeException resultException;
private volatile String lastContinuation;
private volatile boolean isFirstQueryForChangeFeeds;
public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer) {
this.observer = observer;
this.documentClient = documentClient;
this.settings = settings;
this.checkpointer = checkpointer;
this.options = new ChangeFeedOptions();
this.options.setMaxItemCount(settings.getMaxItemCount());
this.options.setPartitionKeyRangeId(settings.getPartitionKeyRangeId());
this.options.setStartFromBeginning(settings.isStartFromBeginning());
this.options.setRequestContinuation(settings.getStartContinuation());
this.options.setStartDateTime(settings.getStartTime());
}
@Override
@Override
public RuntimeException getResultException() {
return this.resultException;
}
private Mono<Void> dispatchChanges(FeedResponse<JsonNode> response) {
ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl(this.settings.getPartitionKeyRangeId(), response, this.checkpointer);
return this.observer.processChanges(context, response.getResults());
}
} | class PartitionProcessorImpl implements PartitionProcessor {
private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class);
private static final int DefaultMaxItemCount = 100;
private final ProcessorSettings settings;
private final PartitionCheckpointer checkpointer;
private final ChangeFeedObserver observer;
private final ChangeFeedOptions options;
private final ChangeFeedContextClient documentClient;
private volatile RuntimeException resultException;
private volatile String lastContinuation;
private volatile boolean isFirstQueryForChangeFeeds;
public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer) {
this.observer = observer;
this.documentClient = documentClient;
this.settings = settings;
this.checkpointer = checkpointer;
this.options = new ChangeFeedOptions();
this.options.setMaxItemCount(settings.getMaxItemCount());
this.options.setPartitionKeyRangeId(settings.getPartitionKeyRangeId());
this.options.setStartFromBeginning(settings.isStartFromBeginning());
this.options.setRequestContinuation(settings.getStartContinuation());
this.options.setStartDateTime(settings.getStartTime());
}
@Override
@Override
public RuntimeException getResultException() {
return this.resultException;
}
private Mono<Void> dispatchChanges(FeedResponse<JsonNode> response) {
ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl(this.settings.getPartitionKeyRangeId(), response, this.checkpointer);
return this.observer.processChanges(context, response.getResults());
}
} | |
Ok, now uses the default hashCode from the base class. | public int hashCode() {
if (this.components == null || this.components.size() == 0) {
return 0;
}
int [] ordinals = new int[this.components.size()];
for (int i = 0; i < this.components.size(); i++) {
ordinals[i] = this.components.get(i).GetTypeOrdinal();
}
return Objects.hashCode(ordinals);
} | } | public int hashCode() {
return super.hashCode();
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} |
and I see there is an issue link to complete the implementation. | public int hashCode() {
if (this.components == null || this.components.size() == 0) {
return 0;
}
int [] ordinals = new int[this.components.size()];
for (int i = 0; i < this.components.size(); i++) {
ordinals[i] = this.components.get(i).GetTypeOrdinal();
}
return Objects.hashCode(ordinals);
} | } | public int hashCode() {
return super.hashCode();
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} | class PartitionKeyInternal implements Comparable<PartitionKeyInternal> {
private static final String TYPE = "type";
private static final String MIN_NUMBER = "MinNumber";
private static final String MAX_NUMBER = "MaxNumber";
private static final String MIN_STRING = "MinString";
private static final String MAX_STRING = "MaxString";
private static final String INFINITY = "Infinity";
public static final PartitionKeyInternal NonePartitionKey =
new PartitionKeyInternal();
public static final PartitionKeyInternal EmptyPartitionKey =
new PartitionKeyInternal(new ArrayList<>());
@SuppressWarnings("serial")
public static final PartitionKeyInternal InfinityPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new InfinityPartitionKeyComponent());
}});
@SuppressWarnings("serial")
public static final PartitionKeyInternal UndefinedPartitionKey =
new PartitionKeyInternal(new ArrayList<IPartitionKeyComponent>() {{
add(new UndefinedPartitionKeyComponent());
}});
public static final PartitionKeyInternal InclusiveMinimum = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal ExclusiveMaximum = PartitionKeyInternal.InfinityPartitionKey;
public static final PartitionKeyInternal Empty = PartitionKeyInternal.EmptyPartitionKey;
public static final PartitionKeyInternal None = PartitionKeyInternal.NonePartitionKey;
final List<IPartitionKeyComponent> components;
public PartitionKeyInternal(List<IPartitionKeyComponent> values) {
if (values == null) {
throw new IllegalArgumentException("values");
}
this.components = values;
}
public PartitionKeyInternal() {
this.components = null;
}
public static PartitionKeyInternal fromJsonString(String partitionKey) {
if (Strings.isNullOrEmpty(partitionKey)) {
throw new IllegalArgumentException(String.format(RMResources.UnableToDeserializePartitionKeyValue, partitionKey));
}
try {
return Utils.getSimpleObjectMapper().readValue(partitionKey, PartitionKeyInternal.class);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
public static PartitionKeyInternal fromObjectArray(Object[] values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
return PartitionKeyInternal.fromObjectArray(Arrays.asList(values), strict);
}
public static PartitionKeyInternal fromObjectArray(List<Object> values, boolean strict) {
if (values == null) {
throw new IllegalArgumentException("values");
}
List<IPartitionKeyComponent> components = new ArrayList<>();
for (Object value : values) {
if (value == NullNode.instance || value == null) {
components.add(NullPartitionKeyComponent.VALUE);
} else if (value instanceof Undefined) {
components.add(UndefinedPartitionKeyComponent.VALUE);
} else if (value instanceof Boolean) {
components.add(new BoolPartitionKeyComponent((boolean) value));
} else if (value instanceof String) {
components.add(new StringPartitionKeyComponent((String) value));
} else if (isNumeric(value)) {
components.add(new NumberPartitionKeyComponent(((Number) value).doubleValue()));
} else if (value instanceof ObjectNode && ((ObjectNode) value).get(TYPE) != null) {
switch (((ObjectNode) value).get(TYPE).asText()) {
case MIN_NUMBER:
components.add(MinNumberPartitionKeyComponent.VALUE);
break;
case MAX_NUMBER:
components.add(MaxNumberPartitionKeyComponent.VALUE);
break;
case MIN_STRING:
components.add(MinStringPartitionKeyComponent.VALUE);
break;
case MAX_STRING:
components.add(MaxStringPartitionKeyComponent.VALUE);
break;
}
} else {
if (strict) {
throw new IllegalArgumentException("Unable to construct PartitionKeyInternal from objects array");
} else {
components.add(UndefinedPartitionKeyComponent.VALUE);
}
}
}
return new PartitionKeyInternal(components);
}
private static boolean isNumeric(Object value) {
return value instanceof Number;
}
private static PartitionKeyInternal getExclusiveMaximum() {
return PartitionKeyInternal.InfinityPartitionKey;
}
public static PartitionKeyInternal getEmpty() {
return PartitionKeyInternal.EmptyPartitionKey;
}
@Override
public boolean equals(Object obj) {
PartitionKeyInternal pki = as(obj, PartitionKeyInternal.class);
if (pki == null) {
return false;
}
if (pki == this) {
return true;
}
return this.compareTo(pki) == 0;
}
@Override
public int compareTo(PartitionKeyInternal other) {
if (other == null) {
throw new IllegalArgumentException("other");
} else if (other.components == null || this.components == null) {
int otherComponentsCount = other.components == null ? 0 : other.components.size();
int thisComponentsCount = this.components == null ? 0 : this.components.size();
return (int) Math.signum(thisComponentsCount - otherComponentsCount);
}
for (int i = 0; i < Math.min(this.components.size(), other.components.size()); i++) {
int leftOrdinal = this.components.get(i).GetTypeOrdinal();
int rightOrdinal = other.components.get(i).GetTypeOrdinal();
if (leftOrdinal != rightOrdinal) {
return (int) Math.signum(leftOrdinal - rightOrdinal);
}
int result = this.components.get(i).CompareTo(other.components.get(i));
if (result != 0) {
return (int) Math.signum(result);
}
}
return (int) Math.signum(this.components.size() - other.components.size());
}
public String toJson() {
try {
return Utils.getSimpleObjectMapper().writeValueAsString(this);
} catch (IOException e) {
throw new IllegalArgumentException("Unable serialize the partition key internal into the JSON string", e);
}
}
public boolean contains(PartitionKeyInternal nestedPartitionKey) {
if (this.components.size() > nestedPartitionKey.components.size()) {
return false;
}
for (int i = 0; i < this.components.size(); i++) {
if (this.components.get(i).CompareTo(nestedPartitionKey.components.get(i)) != 0) {
return false;
}
}
return true;
}
public List<IPartitionKeyComponent> getComponents() {
return components;
}
public String getEffectivePartitionKeyString(PartitionKeyInternal internalPartitionKey, PartitionKeyDefinition partitionKey) {
return PartitionKeyInternalHelper.getEffectivePartitionKeyString(internalPartitionKey, partitionKey);
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonSerializer extends StdSerializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonSerializer() { this(null); }
protected PartitionKeyInternalJsonSerializer(Class<PartitionKeyInternal> t) {
super(t);
}
@Override
public void serialize(PartitionKeyInternal partitionKey, JsonGenerator writer, SerializerProvider serializerProvider) {
try {
if (partitionKey.equals(PartitionKeyInternal.getExclusiveMaximum())) {
writer.writeString(INFINITY);
return;
}
writer.writeStartArray();
for (IPartitionKeyComponent componentValue : partitionKey.getComponents()) {
componentValue.JsonEncode(writer);
}
writer.writeEndArray();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
static void jsonEncode(MinNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_NUMBER);
}
static void jsonEncode(MaxNumberPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_NUMBER);
}
static void jsonEncode(MinStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MIN_STRING);
}
static void jsonEncode(MaxStringPartitionKeyComponent component, JsonGenerator writer) {
jsonEncodeLimit(writer, MAX_STRING);
}
private static void jsonEncodeLimit(JsonGenerator writer, String value) {
try {
writer.writeStartObject();
writer.writeFieldName(TYPE);
writer.writeString(value);
writer.writeEndObject();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
@SuppressWarnings("serial")
static final class PartitionKeyInternalJsonDeserializer extends StdDeserializer<PartitionKeyInternal> {
protected PartitionKeyInternalJsonDeserializer() { this(null); }
protected PartitionKeyInternalJsonDeserializer(Class<?> vc) {
super(vc);
}
@Override
public PartitionKeyInternal deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) {
ObjectCodec objectCodec = jsonParser.getCodec();
JsonNode root;
try {
root = objectCodec.readTree(jsonParser);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (root.isTextual() && root.asText().equals(INFINITY)) {
return PartitionKeyInternal.getExclusiveMaximum();
}
List<Object> objects = new ArrayList<>();
if (root.isArray()) {
Iterator<JsonNode> iterator = root.iterator();
while (iterator.hasNext()) {
JsonNode node = iterator.next();
if (node.isNull()) {
objects.add(null);
} else if (node.isNumber()) {
objects.add(node.asDouble());
} else if (node.isBoolean()) {
objects.add(node.asBoolean());
} else if (node.isTextual()) {
objects.add(node.asText());
} else if (node.isArray() && node.size() == 0
|| node.isObject()
&& (node.fields() == null || !node.fields().hasNext())) {
objects.add(Undefined.Value());
} else {
objects.add(node);
}
}
return PartitionKeyInternal.fromObjectArray(objects, true);
}
throw new IllegalStateException(String.format(
"Unable to deserialize PartitionKeyInternal '%s'",
root.toString()));
}
}
} |
code indentation needs to be fixed. | public Mono<AccessToken> getToken(TokenRequestContext request) {
final StringBuilder errorMsg = new StringBuilder();
return Flux.fromIterable(credentials).flatMap(p -> p.getToken(request).onErrorResume(t -> {
if (t.getMessage() != null && !t.getMessage().contains("authentication unavailable")) {
throw new RuntimeException(UnavailableError+p.getClass().getSimpleName()+"authentication failed.",t);
}
errorMsg.append(" ").append(t.getMessage());
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException(FailedError+errorMsg.toString()+" )"))));
} | return Flux.fromIterable(credentials).flatMap(p -> p.getToken(request).onErrorResume(t -> { | public Mono<AccessToken> getToken(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
return Flux.fromIterable(credentials)
.flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
unavailableError + p.getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage(),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(),
last.getCause());
}
return Mono.error(last);
}));
} | class ChainedTokenCredential implements TokenCredential {
private final Deque<TokenCredential> credentials;
private final String UnavailableError=this.getClass().getSimpleName()+" authentication failed. -> ";
private final String FailedError=this.getClass().getSimpleName()+" failed to retrieve a token from the included credentials.(";
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(Deque<TokenCredential> credentials) {
this.credentials = credentials;
}
@Override
} | class ChainedTokenCredential implements TokenCredential {
private final Deque<TokenCredential> credentials;
private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> ";
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(Deque<TokenCredential> credentials) {
this.credentials = credentials;
}
@Override
} |
This error message is not properly formatted, spacing issues will show up before 'authentication failed' text. | public Mono<AccessToken> getToken(TokenRequestContext request) {
final StringBuilder errorMsg = new StringBuilder();
return Flux.fromIterable(credentials).flatMap(p -> p.getToken(request).onErrorResume(t -> {
if (t.getMessage() != null && !t.getMessage().contains("authentication unavailable")) {
throw new RuntimeException(UnavailableError+p.getClass().getSimpleName()+"authentication failed.",t);
}
errorMsg.append(" ").append(t.getMessage());
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException(FailedError+errorMsg.toString()+" )"))));
} | throw new RuntimeException(UnavailableError+p.getClass().getSimpleName()+"authentication failed.",t); | public Mono<AccessToken> getToken(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
return Flux.fromIterable(credentials)
.flatMap(p -> p.getToken(request).onErrorResume(Exception.class, t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
unavailableError + p.getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage(),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage(),
last.getCause());
}
return Mono.error(last);
}));
} | class ChainedTokenCredential implements TokenCredential {
private final Deque<TokenCredential> credentials;
private final String UnavailableError=this.getClass().getSimpleName()+" authentication failed. -> ";
private final String FailedError=this.getClass().getSimpleName()+" failed to retrieve a token from the included credentials.(";
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(Deque<TokenCredential> credentials) {
this.credentials = credentials;
}
@Override
} | class ChainedTokenCredential implements TokenCredential {
private final Deque<TokenCredential> credentials;
private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> ";
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(Deque<TokenCredential> credentials) {
this.credentials = credentials;
}
@Override
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.