comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
`recognizeEntitiesActionResults.get(taskIndex)` `recognizePiiEntitiesActionResults.get(taskIndex)` or `extractKeyPhrasesActionResults.get(taskIndex)` get the object from the sorted output. So there is no need to reorder the errors.
private AnalyzeBatchActionsResult toAnalyzeTasks(AnalyzeJobState analyzeJobState) { TasksStateTasks tasksStateTasks = analyzeJobState.getTasks(); final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems = tasksStateTasks.getEntityRecognitionPiiTasks(); final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems = tasksStateTasks.getEntityRecognitionTasks(); final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks = tasksStateTasks.getKeyPhraseExtractionTasks(); List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>(); List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>(); List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>(); if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) { for (int i = 0; i < entityRecognitionTasksItems.size(); i++) { final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i); final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult(); RecognizeEntitiesActionResultPropertiesHelper.setResult(actionResult, toRecognizeEntitiesResultCollectionResponse(taskItem.getResults())); TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, taskItem.getLastUpdateDateTime()); recognizeEntitiesActionResults.add(actionResult); } } if (!CoreUtils.isNullOrEmpty(piiTasksItems)) { for (int i = 0; i < piiTasksItems.size(); i++) { final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i); final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult(); RecognizePiiEntitiesActionResultPropertiesHelper.setResult(actionResult, toRecognizePiiEntitiesResultCollection(taskItem.getResults())); TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, taskItem.getLastUpdateDateTime()); recognizePiiEntitiesActionResults.add(actionResult); } } if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) { for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) { final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i); final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult(); ExtractKeyPhrasesActionResultPropertiesHelper.setResult(actionResult, toExtractKeyPhrasesResultCollection(taskItem.getResults())); TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, taskItem.getLastUpdateDateTime()); extractKeyPhrasesActionResults.add(actionResult); } } final List<TextAnalyticsError> errors = analyzeJobState.getErrors(); if (!CoreUtils.isNullOrEmpty(errors)) { for (TextAnalyticsError error : errors) { final String[] targetPair = parseActionErrorTarget(error.getTarget()); final String taskName = targetPair[0]; final Integer taskIndex = Integer.valueOf(targetPair[1]); final TextAnalyticsActionResult actionResult; if ("entityRecognitionTasks".equals(taskName)) { actionResult = recognizeEntitiesActionResults.get(taskIndex); } else if ("entityRecognitionPiiTasks".equals(taskName)) { actionResult = recognizePiiEntitiesActionResults.get(taskIndex); } else if ("keyPhraseExtractionTasks".equals(taskName)) { actionResult = extractKeyPhrasesActionResults.get(taskIndex); } else { throw logger.logExceptionAsError(new RuntimeException( "Invalid task name in target reference, " + taskName)); } TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true); TextAnalyticsActionResultPropertiesHelper.setError(actionResult, new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString( error.getCode() == null ? null : error.getCode().toString()), error.getMessage(), null)); } } final AnalyzeBatchActionsResult analyzeBatchActionsResult = new AnalyzeBatchActionsResult(); final RequestStatistics requestStatistics = analyzeJobState.getStatistics(); TextDocumentBatchStatistics batchStatistics = null; if (requestStatistics != null) { batchStatistics = new TextDocumentBatchStatistics( requestStatistics.getDocumentsCount(), requestStatistics.getErroneousDocumentsCount(), requestStatistics.getValidDocumentsCount(), requestStatistics.getTransactionsCount() ); } AnalyzeBatchActionsResultPropertiesHelper.setStatistics(analyzeBatchActionsResult, batchStatistics); AnalyzeBatchActionsResultPropertiesHelper.setRecognizeEntitiesActionResults(analyzeBatchActionsResult, IterableStream.of(recognizeEntitiesActionResults)); AnalyzeBatchActionsResultPropertiesHelper.setRecognizePiiEntitiesActionResults(analyzeBatchActionsResult, IterableStream.of(recognizePiiEntitiesActionResults)); AnalyzeBatchActionsResultPropertiesHelper.setExtractKeyPhrasesActionResults(analyzeBatchActionsResult, IterableStream.of(extractKeyPhrasesActionResults)); return analyzeBatchActionsResult; }
TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true);
private AnalyzeBatchActionsResult toAnalyzeTasks(AnalyzeJobState analyzeJobState) { TasksStateTasks tasksStateTasks = analyzeJobState.getTasks(); final List<TasksStateTasksEntityRecognitionPiiTasksItem> piiTasksItems = tasksStateTasks.getEntityRecognitionPiiTasks(); final List<TasksStateTasksEntityRecognitionTasksItem> entityRecognitionTasksItems = tasksStateTasks.getEntityRecognitionTasks(); final List<TasksStateTasksKeyPhraseExtractionTasksItem> keyPhraseExtractionTasks = tasksStateTasks.getKeyPhraseExtractionTasks(); List<RecognizeEntitiesActionResult> recognizeEntitiesActionResults = new ArrayList<>(); List<RecognizePiiEntitiesActionResult> recognizePiiEntitiesActionResults = new ArrayList<>(); List<ExtractKeyPhrasesActionResult> extractKeyPhrasesActionResults = new ArrayList<>(); if (!CoreUtils.isNullOrEmpty(entityRecognitionTasksItems)) { for (int i = 0; i < entityRecognitionTasksItems.size(); i++) { final TasksStateTasksEntityRecognitionTasksItem taskItem = entityRecognitionTasksItems.get(i); final RecognizeEntitiesActionResult actionResult = new RecognizeEntitiesActionResult(); RecognizeEntitiesActionResultPropertiesHelper.setResult(actionResult, toRecognizeEntitiesResultCollectionResponse(taskItem.getResults())); TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, taskItem.getLastUpdateDateTime()); recognizeEntitiesActionResults.add(actionResult); } } if (!CoreUtils.isNullOrEmpty(piiTasksItems)) { for (int i = 0; i < piiTasksItems.size(); i++) { final TasksStateTasksEntityRecognitionPiiTasksItem taskItem = piiTasksItems.get(i); final RecognizePiiEntitiesActionResult actionResult = new RecognizePiiEntitiesActionResult(); RecognizePiiEntitiesActionResultPropertiesHelper.setResult(actionResult, toRecognizePiiEntitiesResultCollection(taskItem.getResults())); TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, taskItem.getLastUpdateDateTime()); recognizePiiEntitiesActionResults.add(actionResult); } } if (!CoreUtils.isNullOrEmpty(keyPhraseExtractionTasks)) { for (int i = 0; i < keyPhraseExtractionTasks.size(); i++) { final TasksStateTasksKeyPhraseExtractionTasksItem taskItem = keyPhraseExtractionTasks.get(i); final ExtractKeyPhrasesActionResult actionResult = new ExtractKeyPhrasesActionResult(); ExtractKeyPhrasesActionResultPropertiesHelper.setResult(actionResult, toExtractKeyPhrasesResultCollection(taskItem.getResults())); TextAnalyticsActionResultPropertiesHelper.setCompletedAt(actionResult, taskItem.getLastUpdateDateTime()); extractKeyPhrasesActionResults.add(actionResult); } } final List<TextAnalyticsError> errors = analyzeJobState.getErrors(); if (!CoreUtils.isNullOrEmpty(errors)) { for (TextAnalyticsError error : errors) { final String[] targetPair = parseActionErrorTarget(error.getTarget()); final String taskName = targetPair[0]; final Integer taskIndex = Integer.valueOf(targetPair[1]); final TextAnalyticsActionResult actionResult; if ("entityRecognitionTasks".equals(taskName)) { actionResult = recognizeEntitiesActionResults.get(taskIndex); } else if ("entityRecognitionPiiTasks".equals(taskName)) { actionResult = recognizePiiEntitiesActionResults.get(taskIndex); } else if ("keyPhraseExtractionTasks".equals(taskName)) { actionResult = extractKeyPhrasesActionResults.get(taskIndex); } else { throw logger.logExceptionAsError(new RuntimeException( "Invalid task name in target reference, " + taskName)); } TextAnalyticsActionResultPropertiesHelper.setIsError(actionResult, true); TextAnalyticsActionResultPropertiesHelper.setError(actionResult, new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString( error.getCode() == null ? null : error.getCode().toString()), error.getMessage(), null)); } } final AnalyzeBatchActionsResult analyzeBatchActionsResult = new AnalyzeBatchActionsResult(); final RequestStatistics requestStatistics = analyzeJobState.getStatistics(); TextDocumentBatchStatistics batchStatistics = null; if (requestStatistics != null) { batchStatistics = new TextDocumentBatchStatistics( requestStatistics.getDocumentsCount(), requestStatistics.getErroneousDocumentsCount(), requestStatistics.getValidDocumentsCount(), requestStatistics.getTransactionsCount() ); } AnalyzeBatchActionsResultPropertiesHelper.setStatistics(analyzeBatchActionsResult, batchStatistics); AnalyzeBatchActionsResultPropertiesHelper.setRecognizeEntitiesActionResults(analyzeBatchActionsResult, IterableStream.of(recognizeEntitiesActionResults)); AnalyzeBatchActionsResultPropertiesHelper.setRecognizePiiEntitiesActionResults(analyzeBatchActionsResult, IterableStream.of(recognizePiiEntitiesActionResults)); AnalyzeBatchActionsResultPropertiesHelper.setExtractKeyPhrasesActionResults(analyzeBatchActionsResult, IterableStream.of(extractKeyPhrasesActionResults)); return analyzeBatchActionsResult; }
class AnalyzeBatchActionsAsyncClient { private static final String REGEX_ACTION_ERROR_TARGET = " private final ClientLogger logger = new ClientLogger(AnalyzeBatchActionsAsyncClient.class); private final TextAnalyticsClientImpl service; AnalyzeBatchActionsAsyncClient(TextAnalyticsClientImpl service) { this.service = service; } PollerFlux<AnalyzeBatchActionsOperationDetail, PagedFlux<AnalyzeBatchActionsResult>> beginAnalyzeBatchActions( Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeBatchActionsOptions options, Context context) { try { inputDocumentsValidation(documents); options = getNotNullAnalyzeBatchActionsOptions(options); final Context finalContext = getNotNullContext(context); final AnalyzeBatchInput analyzeBatchInput = new AnalyzeBatchInput() .setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents))) .setTasks(getJobManifestTasks(actions)); analyzeBatchInput.setDisplayName(actions.getDisplayName()); final boolean finalIncludeStatistics = options.isIncludeStatistics(); return new PollerFlux<>( DEFAULT_POLL_INTERVAL, activationOperation( service.analyzeWithResponseAsync(analyzeBatchInput, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE)) .map(analyzeResponse -> { final AnalyzeBatchActionsOperationDetail textAnalyticsOperationResult = new AnalyzeBatchActionsOperationDetail(); AnalyzeBatchActionsOperationDetailPropertiesHelper .setOperationId(textAnalyticsOperationResult, parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation())); return textAnalyticsOperationResult; })), pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId, finalIncludeStatistics, null, null, finalContext)), (activationResponse, pollingContext) -> Mono.error(new RuntimeException("Cancellation is not supported.")), fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage( operationId, null, null, finalIncludeStatistics, finalContext))) ); } catch (RuntimeException ex) { return PollerFlux.error(ex); } } PollerFlux<AnalyzeBatchActionsOperationDetail, PagedIterable<AnalyzeBatchActionsResult>> beginAnalyzeBatchActionsIterable(Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeBatchActionsOptions options, Context context) { try { inputDocumentsValidation(documents); options = getNotNullAnalyzeBatchActionsOptions(options); final Context finalContext = getNotNullContext(context); final AnalyzeBatchInput analyzeBatchInput = new AnalyzeBatchInput() .setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents))) .setTasks(getJobManifestTasks(actions)); analyzeBatchInput.setDisplayName(actions.getDisplayName()); final boolean finalIncludeStatistics = options.isIncludeStatistics(); return new PollerFlux<>( DEFAULT_POLL_INTERVAL, activationOperation( service.analyzeWithResponseAsync(analyzeBatchInput, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE)) .map(analyzeResponse -> { final AnalyzeBatchActionsOperationDetail operationDetail = new AnalyzeBatchActionsOperationDetail(); AnalyzeBatchActionsOperationDetailPropertiesHelper.setOperationId(operationDetail, parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation())); return operationDetail; })), pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId, finalIncludeStatistics, null, null, finalContext)), (activationResponse, pollingContext) -> Mono.error(new RuntimeException("Cancellation is not supported.")), fetchingOperationIterable(operationId -> Mono.just(new PagedIterable<>(getAnalyzeOperationFluxPage( operationId, null, null, finalIncludeStatistics, finalContext)))) ); } catch (RuntimeException ex) { return PollerFlux.error(ex); } } private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) { return new JobManifestTasks() .setEntityRecognitionTasks(actions.getRecognizeEntitiesOptions() == null ? null : StreamSupport.stream(actions.getRecognizeEntitiesOptions().spliterator(), false).map( action -> { if (action == null) { return null; } final EntitiesTask entitiesTask = new EntitiesTask(); entitiesTask.setParameters( new EntitiesTaskParameters() .setModelVersion(getNotNullModelVersion(action.getModelVersion()))); return entitiesTask; }).collect(Collectors.toList())) .setEntityRecognitionPiiTasks(actions.getRecognizePiiEntitiesOptions() == null ? null : StreamSupport.stream(actions.getRecognizePiiEntitiesOptions().spliterator(), false).map( action -> { if (action == null) { return null; } final PiiTask piiTask = new PiiTask(); piiTask.setParameters( new PiiTaskParameters() .setModelVersion(getNotNullModelVersion(action.getModelVersion())) .setDomain(PiiTaskParametersDomain.fromString( action.getDomainFilter() == null ? null : action.getDomainFilter().toString()))); return piiTask; }).collect(Collectors.toList())) .setKeyPhraseExtractionTasks(actions.getExtractKeyPhrasesOptions() == null ? null : StreamSupport.stream(actions.getExtractKeyPhrasesOptions().spliterator(), false).map( action -> { if (action == null) { return null; } final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask(); keyPhrasesTask.setParameters( new KeyPhrasesTaskParameters() .setModelVersion(getNotNullModelVersion(action.getModelVersion()))); return keyPhrasesTask; }).collect(Collectors.toList())); } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<AnalyzeBatchActionsOperationDetail>> activationOperation(Mono<AnalyzeBatchActionsOperationDetail> operationResult) { return pollingContext -> { try { return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<PollResponse<AnalyzeBatchActionsOperationDetail>>> pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) { return pollingContext -> { try { final PollResponse<AnalyzeBatchActionsOperationDetail> operationResultPollResponse = pollingContext.getLatestResponse(); final String operationId = operationResultPollResponse.getValue().getOperationId(); return pollingFunction.apply(operationId) .flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse)) .onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<PagedFlux<AnalyzeBatchActionsResult>>> fetchingOperation(Function<String, Mono<PagedFlux<AnalyzeBatchActionsResult>>> fetchingFunction) { return pollingContext -> { try { final String operationId = pollingContext.getLatestResponse().getValue().getOperationId(); return fetchingFunction.apply(operationId); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<PagedIterable<AnalyzeBatchActionsResult>>> fetchingOperationIterable(Function<String, Mono<PagedIterable<AnalyzeBatchActionsResult>>> fetchingFunction) { return pollingContext -> { try { final String operationId = pollingContext.getLatestResponse().getValue().getOperationId(); return fetchingFunction.apply(operationId); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } PagedFlux<AnalyzeBatchActionsResult> getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip, boolean showStats, Context context) { return new PagedFlux<>( () -> getPage(null, operationId, top, skip, showStats, context), continuationToken -> getPage(continuationToken, operationId, top, skip, showStats, context)); } Mono<PagedResponse<AnalyzeBatchActionsResult>> getPage(String continuationToken, String operationId, Integer top, Integer skip, boolean showStats, Context context) { if (continuationToken != null) { final Map<String, Integer> continuationTokenMap = parseNextLink(continuationToken); final Integer topValue = continuationTokenMap.getOrDefault("$top", null); final Integer skipValue = continuationTokenMap.getOrDefault("$skip", null); return service.analyzeStatusWithResponseAsync(operationId, showStats, topValue, skipValue, context) .map(this::toAnalyzeTasksPagedResponse) .onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } else { return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context) .map(this::toAnalyzeTasksPagedResponse) .onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } } private PagedResponse<AnalyzeBatchActionsResult> toAnalyzeTasksPagedResponse(Response<AnalyzeJobState> response) { final AnalyzeJobState analyzeJobState = response.getValue(); return new PagedResponseBase<Void, AnalyzeBatchActionsResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), Arrays.asList(toAnalyzeTasks(analyzeJobState)), analyzeJobState.getNextLink(), null); } private Mono<PollResponse<AnalyzeBatchActionsOperationDetail>> processAnalyzedModelResponse( Response<AnalyzeJobState> analyzeJobStateResponse, PollResponse<AnalyzeBatchActionsOperationDetail> operationResultPollResponse) { LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) { switch (analyzeJobStateResponse.getValue().getStatus()) { case NOT_STARTED: case RUNNING: status = LongRunningOperationStatus.IN_PROGRESS; break; case SUCCEEDED: status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case CANCELLED: status = LongRunningOperationStatus.USER_CANCELLED; break; default: status = LongRunningOperationStatus.fromString( analyzeJobStateResponse.getValue().getStatus().toString(), true); break; } } AnalyzeBatchActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getDisplayName()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getCreatedDateTime()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getExpirationDateTime()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getLastUpdateDateTime()); final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks(); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(), tasksResult.getFailed()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(), tasksResult.getInProgress()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsSucceeded( operationResultPollResponse.getValue(), tasksResult.getCompleted()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(), tasksResult.getTotal()); return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue())); } private Context getNotNullContext(Context context) { return context == null ? Context.NONE : context; } private AnalyzeBatchActionsOptions getNotNullAnalyzeBatchActionsOptions(AnalyzeBatchActionsOptions options) { return options == null ? new AnalyzeBatchActionsOptions() : options; } private String getNotNullModelVersion(String modelVersion) { return modelVersion == null ? "latest" : modelVersion; } private String[] parseActionErrorTarget(String targetReference) { if (CoreUtils.isNullOrEmpty(targetReference)) { throw logger.logExceptionAsError(new RuntimeException( "Expected an error with a target field referencing an action but did not get one")); } final Pattern pattern = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE); final Matcher matcher = pattern.matcher(targetReference); String[] taskNameIdPair = new String[2]; while (matcher.find()) { taskNameIdPair[0] = matcher.group(1); taskNameIdPair[1] = matcher.group(2); } return taskNameIdPair; } }
class AnalyzeBatchActionsAsyncClient { private static final String REGEX_ACTION_ERROR_TARGET = " private final ClientLogger logger = new ClientLogger(AnalyzeBatchActionsAsyncClient.class); private final TextAnalyticsClientImpl service; AnalyzeBatchActionsAsyncClient(TextAnalyticsClientImpl service) { this.service = service; } PollerFlux<AnalyzeBatchActionsOperationDetail, PagedFlux<AnalyzeBatchActionsResult>> beginAnalyzeBatchActions( Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeBatchActionsOptions options, Context context) { try { inputDocumentsValidation(documents); options = getNotNullAnalyzeBatchActionsOptions(options); final Context finalContext = getNotNullContext(context); final AnalyzeBatchInput analyzeBatchInput = new AnalyzeBatchInput() .setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents))) .setTasks(getJobManifestTasks(actions)); analyzeBatchInput.setDisplayName(actions.getDisplayName()); final boolean finalIncludeStatistics = options.isIncludeStatistics(); return new PollerFlux<>( DEFAULT_POLL_INTERVAL, activationOperation( service.analyzeWithResponseAsync(analyzeBatchInput, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE)) .map(analyzeResponse -> { final AnalyzeBatchActionsOperationDetail textAnalyticsOperationResult = new AnalyzeBatchActionsOperationDetail(); AnalyzeBatchActionsOperationDetailPropertiesHelper .setOperationId(textAnalyticsOperationResult, parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation())); return textAnalyticsOperationResult; })), pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId, finalIncludeStatistics, null, null, finalContext)), (activationResponse, pollingContext) -> Mono.error(new RuntimeException("Cancellation is not supported.")), fetchingOperation(operationId -> Mono.just(getAnalyzeOperationFluxPage( operationId, null, null, finalIncludeStatistics, finalContext))) ); } catch (RuntimeException ex) { return PollerFlux.error(ex); } } PollerFlux<AnalyzeBatchActionsOperationDetail, PagedIterable<AnalyzeBatchActionsResult>> beginAnalyzeBatchActionsIterable(Iterable<TextDocumentInput> documents, TextAnalyticsActions actions, AnalyzeBatchActionsOptions options, Context context) { try { inputDocumentsValidation(documents); options = getNotNullAnalyzeBatchActionsOptions(options); final Context finalContext = getNotNullContext(context); final AnalyzeBatchInput analyzeBatchInput = new AnalyzeBatchInput() .setAnalysisInput(new MultiLanguageBatchInput().setDocuments(toMultiLanguageInput(documents))) .setTasks(getJobManifestTasks(actions)); analyzeBatchInput.setDisplayName(actions.getDisplayName()); final boolean finalIncludeStatistics = options.isIncludeStatistics(); return new PollerFlux<>( DEFAULT_POLL_INTERVAL, activationOperation( service.analyzeWithResponseAsync(analyzeBatchInput, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, COGNITIVE_TRACING_NAMESPACE_VALUE)) .map(analyzeResponse -> { final AnalyzeBatchActionsOperationDetail operationDetail = new AnalyzeBatchActionsOperationDetail(); AnalyzeBatchActionsOperationDetailPropertiesHelper.setOperationId(operationDetail, parseOperationId(analyzeResponse.getDeserializedHeaders().getOperationLocation())); return operationDetail; })), pollingOperation(operationId -> service.analyzeStatusWithResponseAsync(operationId, finalIncludeStatistics, null, null, finalContext)), (activationResponse, pollingContext) -> Mono.error(new RuntimeException("Cancellation is not supported.")), fetchingOperationIterable(operationId -> Mono.just(new PagedIterable<>(getAnalyzeOperationFluxPage( operationId, null, null, finalIncludeStatistics, finalContext)))) ); } catch (RuntimeException ex) { return PollerFlux.error(ex); } } private JobManifestTasks getJobManifestTasks(TextAnalyticsActions actions) { return new JobManifestTasks() .setEntityRecognitionTasks(actions.getRecognizeEntitiesOptions() == null ? null : StreamSupport.stream(actions.getRecognizeEntitiesOptions().spliterator(), false).map( action -> { if (action == null) { return null; } final EntitiesTask entitiesTask = new EntitiesTask(); entitiesTask.setParameters( new EntitiesTaskParameters() .setModelVersion(getNotNullModelVersion(action.getModelVersion()))); return entitiesTask; }).collect(Collectors.toList())) .setEntityRecognitionPiiTasks(actions.getRecognizePiiEntitiesOptions() == null ? null : StreamSupport.stream(actions.getRecognizePiiEntitiesOptions().spliterator(), false).map( action -> { if (action == null) { return null; } final PiiTask piiTask = new PiiTask(); piiTask.setParameters( new PiiTaskParameters() .setModelVersion(getNotNullModelVersion(action.getModelVersion())) .setDomain(PiiTaskParametersDomain.fromString( action.getDomainFilter() == null ? null : action.getDomainFilter().toString()))); return piiTask; }).collect(Collectors.toList())) .setKeyPhraseExtractionTasks(actions.getExtractKeyPhrasesOptions() == null ? null : StreamSupport.stream(actions.getExtractKeyPhrasesOptions().spliterator(), false).map( action -> { if (action == null) { return null; } final KeyPhrasesTask keyPhrasesTask = new KeyPhrasesTask(); keyPhrasesTask.setParameters( new KeyPhrasesTaskParameters() .setModelVersion(getNotNullModelVersion(action.getModelVersion()))); return keyPhrasesTask; }).collect(Collectors.toList())); } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<AnalyzeBatchActionsOperationDetail>> activationOperation(Mono<AnalyzeBatchActionsOperationDetail> operationResult) { return pollingContext -> { try { return operationResult.onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<PollResponse<AnalyzeBatchActionsOperationDetail>>> pollingOperation(Function<String, Mono<Response<AnalyzeJobState>>> pollingFunction) { return pollingContext -> { try { final PollResponse<AnalyzeBatchActionsOperationDetail> operationResultPollResponse = pollingContext.getLatestResponse(); final String operationId = operationResultPollResponse.getValue().getOperationId(); return pollingFunction.apply(operationId) .flatMap(modelResponse -> processAnalyzedModelResponse(modelResponse, operationResultPollResponse)) .onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<PagedFlux<AnalyzeBatchActionsResult>>> fetchingOperation(Function<String, Mono<PagedFlux<AnalyzeBatchActionsResult>>> fetchingFunction) { return pollingContext -> { try { final String operationId = pollingContext.getLatestResponse().getValue().getOperationId(); return fetchingFunction.apply(operationId); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } private Function<PollingContext<AnalyzeBatchActionsOperationDetail>, Mono<PagedIterable<AnalyzeBatchActionsResult>>> fetchingOperationIterable(Function<String, Mono<PagedIterable<AnalyzeBatchActionsResult>>> fetchingFunction) { return pollingContext -> { try { final String operationId = pollingContext.getLatestResponse().getValue().getOperationId(); return fetchingFunction.apply(operationId); } catch (RuntimeException ex) { return monoError(logger, ex); } }; } PagedFlux<AnalyzeBatchActionsResult> getAnalyzeOperationFluxPage(String operationId, Integer top, Integer skip, boolean showStats, Context context) { return new PagedFlux<>( () -> getPage(null, operationId, top, skip, showStats, context), continuationToken -> getPage(continuationToken, operationId, top, skip, showStats, context)); } Mono<PagedResponse<AnalyzeBatchActionsResult>> getPage(String continuationToken, String operationId, Integer top, Integer skip, boolean showStats, Context context) { if (continuationToken != null) { final Map<String, Integer> continuationTokenMap = parseNextLink(continuationToken); final Integer topValue = continuationTokenMap.getOrDefault("$top", null); final Integer skipValue = continuationTokenMap.getOrDefault("$skip", null); return service.analyzeStatusWithResponseAsync(operationId, showStats, topValue, skipValue, context) .map(this::toAnalyzeTasksPagedResponse) .onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } else { return service.analyzeStatusWithResponseAsync(operationId, showStats, top, skip, context) .map(this::toAnalyzeTasksPagedResponse) .onErrorMap(Utility::mapToHttpResponseExceptionIfExist); } } private PagedResponse<AnalyzeBatchActionsResult> toAnalyzeTasksPagedResponse(Response<AnalyzeJobState> response) { final AnalyzeJobState analyzeJobState = response.getValue(); return new PagedResponseBase<Void, AnalyzeBatchActionsResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), Arrays.asList(toAnalyzeTasks(analyzeJobState)), analyzeJobState.getNextLink(), null); } private Mono<PollResponse<AnalyzeBatchActionsOperationDetail>> processAnalyzedModelResponse( Response<AnalyzeJobState> analyzeJobStateResponse, PollResponse<AnalyzeBatchActionsOperationDetail> operationResultPollResponse) { LongRunningOperationStatus status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; if (analyzeJobStateResponse.getValue() != null && analyzeJobStateResponse.getValue().getStatus() != null) { switch (analyzeJobStateResponse.getValue().getStatus()) { case NOT_STARTED: case RUNNING: status = LongRunningOperationStatus.IN_PROGRESS; break; case SUCCEEDED: status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case CANCELLED: status = LongRunningOperationStatus.USER_CANCELLED; break; default: status = LongRunningOperationStatus.fromString( analyzeJobStateResponse.getValue().getStatus().toString(), true); break; } } AnalyzeBatchActionsOperationDetailPropertiesHelper.setDisplayName(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getDisplayName()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setCreatedAt(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getCreatedDateTime()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setExpiresAt(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getExpirationDateTime()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setLastModifiedAt(operationResultPollResponse.getValue(), analyzeJobStateResponse.getValue().getLastUpdateDateTime()); final TasksStateTasks tasksResult = analyzeJobStateResponse.getValue().getTasks(); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsFailed(operationResultPollResponse.getValue(), tasksResult.getFailed()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsInProgress(operationResultPollResponse.getValue(), tasksResult.getInProgress()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsSucceeded( operationResultPollResponse.getValue(), tasksResult.getCompleted()); AnalyzeBatchActionsOperationDetailPropertiesHelper.setActionsInTotal(operationResultPollResponse.getValue(), tasksResult.getTotal()); return Mono.just(new PollResponse<>(status, operationResultPollResponse.getValue())); } private Context getNotNullContext(Context context) { return context == null ? Context.NONE : context; } private AnalyzeBatchActionsOptions getNotNullAnalyzeBatchActionsOptions(AnalyzeBatchActionsOptions options) { return options == null ? new AnalyzeBatchActionsOptions() : options; } private String getNotNullModelVersion(String modelVersion) { return modelVersion == null ? "latest" : modelVersion; } private String[] parseActionErrorTarget(String targetReference) { if (CoreUtils.isNullOrEmpty(targetReference)) { throw logger.logExceptionAsError(new RuntimeException( "Expected an error with a target field referencing an action but did not get one")); } final Pattern pattern = Pattern.compile(REGEX_ACTION_ERROR_TARGET, Pattern.MULTILINE); final Matcher matcher = pattern.matcher(targetReference); String[] taskNameIdPair = new String[2]; while (matcher.find()) { taskNameIdPair[0] = matcher.group(1); taskNameIdPair[1] = matcher.group(2); } return taskNameIdPair; } }
This should not be reset to `null`. Instead, in `buildAsyncClient` method, check if both `tokenCredential` and `keyCredential` are set and throw an exception. The other option is to prioritize one type of credential over the other in the build method. In this implementation, the last call to `credential()` wins and it may not be obvious to the user.
public MixedRealityStsClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); this.keyCredential = null; return this; }
this.keyCredential = null;
public MixedRealityStsClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; }
class MixedRealityStsClientBuilder { private static final String MIXED_REALITY_STS_PROPERTIES = "azure-mixedreality-authentication.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); private final ClientLogger logger = new ClientLogger(MixedRealityStsClientBuilder.class); private String accountDomain; private String accountId; private MixedRealityStsServiceVersion apiVersion; private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private AzureKeyCredential keyCredential; private HttpLogOptions logOptions = new HttpLogOptions(); private HttpPipeline pipeline; private RetryPolicy retryPolicy; private TokenCredential tokenCredential; /** * Constructs a new builder used to configure and build {@link MixedRealityStsClient MixedRealityStsClients} and * {@link MixedRealityStsAsyncClient MixedRealityStsAsyncClients}. */ public MixedRealityStsClientBuilder() { } /** * Sets the Mixed Reality service account domain. * * @param accountDomain The Mixed Reality service account domain. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountDomain} is null or empty. */ public MixedRealityStsClientBuilder accountDomain(String accountDomain) { Objects.requireNonNull(accountDomain, "'accountDomain' cannot be null."); if (accountDomain.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountDomain' cannot be an empty string.")); } this.accountDomain = accountDomain; return this; } /** * Sets the Mixed Reality service account identifier. * * @param accountId The Mixed Reality service account identifier. The value is expected to be in UUID format. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountId} is null or empty. */ public MixedRealityStsClientBuilder accountId(String accountId) { Objects.requireNonNull(accountId, "'accountId' cannot be null."); if (accountId.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountId' cannot be an empty string.")); } this.accountId = accountId; return this; } /** * Apply additional {@link HttpPipelinePolicy policies}. * * @param customPolicy An HttpPipelinePolicy object to be applied after the defaults. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Create a {@link MixedRealityStsClient} based on options set in the builder. Every time {@code buildClient()} is * called a new instance of {@link MixedRealityStsClient} is created. * * @return A {@link MixedRealityStsClient} with the options set from the builder. */ public MixedRealityStsClient buildClient() { return new MixedRealityStsClient(this.buildAsyncClient()); } /** * Create a {@link MixedRealityStsAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link MixedRealityStsAsyncClient} is created. * * @return A {@link MixedRealityStsAsyncClient} with the options set from the builder. * @throws NullPointerException If any required values are null. * @throws IllegalArgumentException If the accountId or endpoint are not properly formatted. */ public MixedRealityStsAsyncClient buildAsyncClient() { Objects.requireNonNull(this.accountId, "The 'accountId' has not been set and is required."); Objects.requireNonNull(this.accountDomain, "The 'accountDomain' has not been set and is required."); UUID accountId; try { accountId = UUID.fromString(this.accountId); } catch (IllegalArgumentException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'accountId' must be a UUID formatted value.")); } String endpoint; if (this.endpoint != null) { try { new URL(this.endpoint); endpoint = this.endpoint; } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'endpoint' must be a valid URL.")); } } else { endpoint = AuthenticationEndpoint.constructFromDomain(this.accountDomain); } if (this.pipeline == null) { if (this.tokenCredential == null && this.keyCredential != null) { this.tokenCredential = new MixedRealityAccountKeyCredential(accountId, this.keyCredential); } Objects.requireNonNull(this.tokenCredential, "The 'credential' has not been set and is required."); String scope = AuthenticationEndpoint.constructScope(endpoint); HttpPipelinePolicy authPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential, scope); this.pipeline = createHttpPipeline(this.httpClient, authPolicy, this.customPolicies); } MixedRealityStsServiceVersion version; if (this.apiVersion != null) { version = this.apiVersion; } else { version = MixedRealityStsServiceVersion.getLatest(); } MixedRealityStsRestClientImpl serviceClient = new MixedRealityStsRestClientImplBuilder() .apiVersion(version.getVersion()) .pipeline(this.pipeline) .host(endpoint) .buildClient(); return new MixedRealityStsAsyncClient(accountId, serviceClient); } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * <p> * <b>Note:</b> Not recommended for production applications. * * @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code keyCredential} is null. */ public MixedRealityStsClientBuilder credential(AzureKeyCredential keyCredential) { this.keyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null."); this.tokenCredential = null; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the Mixed Reality STS service endpoint. * * @param endpoint The Mixed Reality STS service endpoint. * @return The updated MixedRealityStsClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public MixedRealityStsClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * MixedRealityStsClientBuilder * MixedRealityStsClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the {@link RetryPolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link MixedRealityStsClientBuilder * build {@link MixedRealityStsAsyncClient} or {@link MixedRealityStsClient}. * * @param retryPolicy The {@link RetryPolicy} that will be used to retry requests. * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MixedRealityStsServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MixedRealityStsServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder serviceVersion(MixedRealityStsServiceVersion version) { this.apiVersion = version; return this; } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { RetryPolicy retryPolicy = this.retryPolicy != null ? this.retryPolicy : new RetryPolicy(); policies.add(getUserAgentPolicy()); if (this.clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); this.clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } policies.add(retryPolicy); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(this.logOptions)); } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> additionalPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (additionalPolicies != null && additionalPolicies.size() > 0) { policies.addAll(additionalPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /* * Creates a {@link UserAgentPolicy} using the default service module name and version. * * @return The default {@link UserAgentPolicy} for the module. */ private UserAgentPolicy getUserAgentPolicy() { Map<String, String> properties = CoreUtils.getProperties(MIXED_REALITY_STS_PROPERTIES); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = this.clientOptions == null ? this.logOptions.getApplicationId() : this.clientOptions.getApplicationId(); return new UserAgentPolicy( applicationId, clientName, clientVersion, this.configuration); } }
class MixedRealityStsClientBuilder { private static final String MIXED_REALITY_STS_PROPERTIES = "azure-mixedreality-authentication.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); private final ClientLogger logger = new ClientLogger(MixedRealityStsClientBuilder.class); private String accountDomain; private String accountId; private MixedRealityStsServiceVersion apiVersion; private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private AzureKeyCredential keyCredential; private HttpLogOptions logOptions = new HttpLogOptions(); private HttpPipeline pipeline; private RetryPolicy retryPolicy; private TokenCredential tokenCredential; /** * Constructs a new builder used to configure and build {@link MixedRealityStsClient MixedRealityStsClients} and * {@link MixedRealityStsAsyncClient MixedRealityStsAsyncClients}. */ public MixedRealityStsClientBuilder() { } /** * Sets the Mixed Reality service account domain. * * @param accountDomain The Mixed Reality service account domain. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountDomain} is null or empty. */ public MixedRealityStsClientBuilder accountDomain(String accountDomain) { Objects.requireNonNull(accountDomain, "'accountDomain' cannot be null."); if (accountDomain.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountDomain' cannot be an empty string.")); } this.accountDomain = accountDomain; return this; } /** * Sets the Mixed Reality service account identifier. * * @param accountId The Mixed Reality service account identifier. The value is expected to be in UUID format. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountId} is null or empty. */ public MixedRealityStsClientBuilder accountId(String accountId) { Objects.requireNonNull(accountId, "'accountId' cannot be null."); if (accountId.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountId' cannot be an empty string.")); } this.accountId = accountId; return this; } /** * Apply additional {@link HttpPipelinePolicy policies}. * * @param customPolicy An HttpPipelinePolicy object to be applied after the defaults. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Create a {@link MixedRealityStsClient} based on options set in the builder. Every time {@code buildClient()} is * called a new instance of {@link MixedRealityStsClient} is created. * * @return A {@link MixedRealityStsClient} with the options set from the builder. */ public MixedRealityStsClient buildClient() { return new MixedRealityStsClient(this.buildAsyncClient()); } /** * Create a {@link MixedRealityStsAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link MixedRealityStsAsyncClient} is created. * * @return A {@link MixedRealityStsAsyncClient} with the options set from the builder. * @throws NullPointerException If any required values are null. * @throws IllegalArgumentException If the accountId or endpoint are not properly formatted. */ public MixedRealityStsAsyncClient buildAsyncClient() { Objects.requireNonNull(this.accountId, "The 'accountId' has not been set and is required."); Objects.requireNonNull(this.accountDomain, "The 'accountDomain' has not been set and is required."); UUID accountId; try { accountId = UUID.fromString(this.accountId); } catch (IllegalArgumentException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'accountId' must be a UUID formatted value.")); } String endpoint; if (this.endpoint != null) { try { new URL(this.endpoint); endpoint = this.endpoint; } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'endpoint' must be a valid URL.")); } } else { endpoint = AuthenticationEndpoint.constructFromDomain(this.accountDomain); } if (this.pipeline == null) { if (this.tokenCredential != null && this.keyCredential != null) { throw logger.logExceptionAsWarning( new IllegalArgumentException("Only a single type of credential may be specified.")); } if (this.tokenCredential == null && this.keyCredential != null) { this.tokenCredential = new MixedRealityAccountKeyCredential(accountId, this.keyCredential); } Objects.requireNonNull(this.tokenCredential, "The 'credential' has not been set and is required."); String scope = AuthenticationEndpoint.constructScope(endpoint); HttpPipelinePolicy authPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential, scope); this.pipeline = createHttpPipeline(this.httpClient, authPolicy, this.customPolicies); } MixedRealityStsServiceVersion version; if (this.apiVersion != null) { version = this.apiVersion; } else { version = MixedRealityStsServiceVersion.getLatest(); } MixedRealityStsRestClientImpl serviceClient = new MixedRealityStsRestClientImplBuilder() .apiVersion(version.getVersion()) .pipeline(this.pipeline) .host(endpoint) .buildClient(); return new MixedRealityStsAsyncClient(accountId, serviceClient); } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * <p> * <b>Note:</b> Not recommended for production applications. * * @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code keyCredential} is null. */ public MixedRealityStsClientBuilder credential(AzureKeyCredential keyCredential) { this.keyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null."); return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the Mixed Reality STS service endpoint. * * @param endpoint The Mixed Reality STS service endpoint. * @return The updated MixedRealityStsClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public MixedRealityStsClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * MixedRealityStsClientBuilder * MixedRealityStsClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the {@link RetryPolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link MixedRealityStsClientBuilder * build {@link MixedRealityStsAsyncClient} or {@link MixedRealityStsClient}. * * @param retryPolicy The {@link RetryPolicy} that will be used to retry requests. * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MixedRealityStsServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MixedRealityStsServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder serviceVersion(MixedRealityStsServiceVersion version) { this.apiVersion = version; return this; } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { RetryPolicy retryPolicy = this.retryPolicy != null ? this.retryPolicy : new RetryPolicy(); policies.add(getUserAgentPolicy()); if (this.clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); this.clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } policies.add(retryPolicy); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(this.logOptions)); } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> additionalPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (additionalPolicies != null && additionalPolicies.size() > 0) { policies.addAll(additionalPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /* * Creates a {@link UserAgentPolicy} using the default service module name and version. * * @return The default {@link UserAgentPolicy} for the module. */ private UserAgentPolicy getUserAgentPolicy() { Map<String, String> properties = CoreUtils.getProperties(MIXED_REALITY_STS_PROPERTIES); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = this.clientOptions == null ? this.logOptions.getApplicationId() : this.clientOptions.getApplicationId(); return new UserAgentPolicy( applicationId, clientName, clientVersion, this.configuration); } }
Ok. I'll throw an exception in the case that both are set.
public MixedRealityStsClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); this.keyCredential = null; return this; }
this.keyCredential = null;
public MixedRealityStsClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; }
class MixedRealityStsClientBuilder { private static final String MIXED_REALITY_STS_PROPERTIES = "azure-mixedreality-authentication.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); private final ClientLogger logger = new ClientLogger(MixedRealityStsClientBuilder.class); private String accountDomain; private String accountId; private MixedRealityStsServiceVersion apiVersion; private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private AzureKeyCredential keyCredential; private HttpLogOptions logOptions = new HttpLogOptions(); private HttpPipeline pipeline; private RetryPolicy retryPolicy; private TokenCredential tokenCredential; /** * Constructs a new builder used to configure and build {@link MixedRealityStsClient MixedRealityStsClients} and * {@link MixedRealityStsAsyncClient MixedRealityStsAsyncClients}. */ public MixedRealityStsClientBuilder() { } /** * Sets the Mixed Reality service account domain. * * @param accountDomain The Mixed Reality service account domain. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountDomain} is null or empty. */ public MixedRealityStsClientBuilder accountDomain(String accountDomain) { Objects.requireNonNull(accountDomain, "'accountDomain' cannot be null."); if (accountDomain.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountDomain' cannot be an empty string.")); } this.accountDomain = accountDomain; return this; } /** * Sets the Mixed Reality service account identifier. * * @param accountId The Mixed Reality service account identifier. The value is expected to be in UUID format. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountId} is null or empty. */ public MixedRealityStsClientBuilder accountId(String accountId) { Objects.requireNonNull(accountId, "'accountId' cannot be null."); if (accountId.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountId' cannot be an empty string.")); } this.accountId = accountId; return this; } /** * Apply additional {@link HttpPipelinePolicy policies}. * * @param customPolicy An HttpPipelinePolicy object to be applied after the defaults. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Create a {@link MixedRealityStsClient} based on options set in the builder. Every time {@code buildClient()} is * called a new instance of {@link MixedRealityStsClient} is created. * * @return A {@link MixedRealityStsClient} with the options set from the builder. */ public MixedRealityStsClient buildClient() { return new MixedRealityStsClient(this.buildAsyncClient()); } /** * Create a {@link MixedRealityStsAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link MixedRealityStsAsyncClient} is created. * * @return A {@link MixedRealityStsAsyncClient} with the options set from the builder. * @throws NullPointerException If any required values are null. * @throws IllegalArgumentException If the accountId or endpoint are not properly formatted. */ public MixedRealityStsAsyncClient buildAsyncClient() { Objects.requireNonNull(this.accountId, "The 'accountId' has not been set and is required."); Objects.requireNonNull(this.accountDomain, "The 'accountDomain' has not been set and is required."); UUID accountId; try { accountId = UUID.fromString(this.accountId); } catch (IllegalArgumentException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'accountId' must be a UUID formatted value.")); } String endpoint; if (this.endpoint != null) { try { new URL(this.endpoint); endpoint = this.endpoint; } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'endpoint' must be a valid URL.")); } } else { endpoint = AuthenticationEndpoint.constructFromDomain(this.accountDomain); } if (this.pipeline == null) { if (this.tokenCredential == null && this.keyCredential != null) { this.tokenCredential = new MixedRealityAccountKeyCredential(accountId, this.keyCredential); } Objects.requireNonNull(this.tokenCredential, "The 'credential' has not been set and is required."); String scope = AuthenticationEndpoint.constructScope(endpoint); HttpPipelinePolicy authPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential, scope); this.pipeline = createHttpPipeline(this.httpClient, authPolicy, this.customPolicies); } MixedRealityStsServiceVersion version; if (this.apiVersion != null) { version = this.apiVersion; } else { version = MixedRealityStsServiceVersion.getLatest(); } MixedRealityStsRestClientImpl serviceClient = new MixedRealityStsRestClientImplBuilder() .apiVersion(version.getVersion()) .pipeline(this.pipeline) .host(endpoint) .buildClient(); return new MixedRealityStsAsyncClient(accountId, serviceClient); } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * <p> * <b>Note:</b> Not recommended for production applications. * * @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code keyCredential} is null. */ public MixedRealityStsClientBuilder credential(AzureKeyCredential keyCredential) { this.keyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null."); this.tokenCredential = null; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the Mixed Reality STS service endpoint. * * @param endpoint The Mixed Reality STS service endpoint. * @return The updated MixedRealityStsClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public MixedRealityStsClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * MixedRealityStsClientBuilder * MixedRealityStsClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the {@link RetryPolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link MixedRealityStsClientBuilder * build {@link MixedRealityStsAsyncClient} or {@link MixedRealityStsClient}. * * @param retryPolicy The {@link RetryPolicy} that will be used to retry requests. * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MixedRealityStsServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MixedRealityStsServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder serviceVersion(MixedRealityStsServiceVersion version) { this.apiVersion = version; return this; } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { RetryPolicy retryPolicy = this.retryPolicy != null ? this.retryPolicy : new RetryPolicy(); policies.add(getUserAgentPolicy()); if (this.clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); this.clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } policies.add(retryPolicy); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(this.logOptions)); } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> additionalPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (additionalPolicies != null && additionalPolicies.size() > 0) { policies.addAll(additionalPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /* * Creates a {@link UserAgentPolicy} using the default service module name and version. * * @return The default {@link UserAgentPolicy} for the module. */ private UserAgentPolicy getUserAgentPolicy() { Map<String, String> properties = CoreUtils.getProperties(MIXED_REALITY_STS_PROPERTIES); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = this.clientOptions == null ? this.logOptions.getApplicationId() : this.clientOptions.getApplicationId(); return new UserAgentPolicy( applicationId, clientName, clientVersion, this.configuration); } }
class MixedRealityStsClientBuilder { private static final String MIXED_REALITY_STS_PROPERTIES = "azure-mixedreality-authentication.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); private final ClientLogger logger = new ClientLogger(MixedRealityStsClientBuilder.class); private String accountDomain; private String accountId; private MixedRealityStsServiceVersion apiVersion; private ClientOptions clientOptions; private Configuration configuration; private String endpoint; private HttpClient httpClient; private AzureKeyCredential keyCredential; private HttpLogOptions logOptions = new HttpLogOptions(); private HttpPipeline pipeline; private RetryPolicy retryPolicy; private TokenCredential tokenCredential; /** * Constructs a new builder used to configure and build {@link MixedRealityStsClient MixedRealityStsClients} and * {@link MixedRealityStsAsyncClient MixedRealityStsAsyncClients}. */ public MixedRealityStsClientBuilder() { } /** * Sets the Mixed Reality service account domain. * * @param accountDomain The Mixed Reality service account domain. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountDomain} is null or empty. */ public MixedRealityStsClientBuilder accountDomain(String accountDomain) { Objects.requireNonNull(accountDomain, "'accountDomain' cannot be null."); if (accountDomain.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountDomain' cannot be an empty string.")); } this.accountDomain = accountDomain; return this; } /** * Sets the Mixed Reality service account identifier. * * @param accountId The Mixed Reality service account identifier. The value is expected to be in UUID format. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws IllegalArgumentException If {@code accountId} is null or empty. */ public MixedRealityStsClientBuilder accountId(String accountId) { Objects.requireNonNull(accountId, "'accountId' cannot be null."); if (accountId.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'accountId' cannot be an empty string.")); } this.accountId = accountId; return this; } /** * Apply additional {@link HttpPipelinePolicy policies}. * * @param customPolicy An HttpPipelinePolicy object to be applied after the defaults. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Create a {@link MixedRealityStsClient} based on options set in the builder. Every time {@code buildClient()} is * called a new instance of {@link MixedRealityStsClient} is created. * * @return A {@link MixedRealityStsClient} with the options set from the builder. */ public MixedRealityStsClient buildClient() { return new MixedRealityStsClient(this.buildAsyncClient()); } /** * Create a {@link MixedRealityStsAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link MixedRealityStsAsyncClient} is created. * * @return A {@link MixedRealityStsAsyncClient} with the options set from the builder. * @throws NullPointerException If any required values are null. * @throws IllegalArgumentException If the accountId or endpoint are not properly formatted. */ public MixedRealityStsAsyncClient buildAsyncClient() { Objects.requireNonNull(this.accountId, "The 'accountId' has not been set and is required."); Objects.requireNonNull(this.accountDomain, "The 'accountDomain' has not been set and is required."); UUID accountId; try { accountId = UUID.fromString(this.accountId); } catch (IllegalArgumentException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'accountId' must be a UUID formatted value.")); } String endpoint; if (this.endpoint != null) { try { new URL(this.endpoint); endpoint = this.endpoint; } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("The 'endpoint' must be a valid URL.")); } } else { endpoint = AuthenticationEndpoint.constructFromDomain(this.accountDomain); } if (this.pipeline == null) { if (this.tokenCredential != null && this.keyCredential != null) { throw logger.logExceptionAsWarning( new IllegalArgumentException("Only a single type of credential may be specified.")); } if (this.tokenCredential == null && this.keyCredential != null) { this.tokenCredential = new MixedRealityAccountKeyCredential(accountId, this.keyCredential); } Objects.requireNonNull(this.tokenCredential, "The 'credential' has not been set and is required."); String scope = AuthenticationEndpoint.constructScope(endpoint); HttpPipelinePolicy authPolicy = new BearerTokenAuthenticationPolicy(this.tokenCredential, scope); this.pipeline = createHttpPipeline(this.httpClient, authPolicy, this.customPolicies); } MixedRealityStsServiceVersion version; if (this.apiVersion != null) { version = this.apiVersion; } else { version = MixedRealityStsServiceVersion.getLatest(); } MixedRealityStsRestClientImpl serviceClient = new MixedRealityStsRestClientImplBuilder() .apiVersion(version.getVersion()) .pipeline(this.pipeline) .host(endpoint) .buildClient(); return new MixedRealityStsAsyncClient(accountId, serviceClient); } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential The {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ /** * Sets the {@link AzureKeyCredential} used to authenticate HTTP requests. * * <p> * <b>Note:</b> Not recommended for production applications. * * @param keyCredential The {@link AzureKeyCredential} used to authenticate HTTP requests. * @return The updated {@link MixedRealityStsClientBuilder} object. * @throws NullPointerException If {@code keyCredential} is null. */ public MixedRealityStsClientBuilder credential(AzureKeyCredential keyCredential) { this.keyCredential = Objects.requireNonNull(keyCredential, "'keyCredential' cannot be null."); return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the Mixed Reality STS service endpoint. * * @param endpoint The Mixed Reality STS service endpoint. * @return The updated MixedRealityStsClientBuilder object. * @throws IllegalArgumentException If {@code endpoint} is null or it cannot be parsed into a valid URL. */ public MixedRealityStsClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from {@link * MixedRealityStsClientBuilder * MixedRealityStsClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link MixedRealityStsClientBuilder} object. */ public MixedRealityStsClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the {@link RetryPolicy} that is used to retry requests. * <p> * The default retry policy will be used if not provided {@link MixedRealityStsClientBuilder * build {@link MixedRealityStsAsyncClient} or {@link MixedRealityStsClient}. * * @param retryPolicy The {@link RetryPolicy} that will be used to retry requests. * @return The updated MixedRealityStsClientBuilder object. */ public MixedRealityStsClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MixedRealityStsServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MixedRealityStsServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public MixedRealityStsClientBuilder serviceVersion(MixedRealityStsServiceVersion version) { this.apiVersion = version; return this; } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { RetryPolicy retryPolicy = this.retryPolicy != null ? this.retryPolicy : new RetryPolicy(); policies.add(getUserAgentPolicy()); if (this.clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); this.clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } policies.add(retryPolicy); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(this.logOptions)); } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> additionalPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (additionalPolicies != null && additionalPolicies.size() > 0) { policies.addAll(additionalPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /* * Creates a {@link UserAgentPolicy} using the default service module name and version. * * @return The default {@link UserAgentPolicy} for the module. */ private UserAgentPolicy getUserAgentPolicy() { Map<String, String> properties = CoreUtils.getProperties(MIXED_REALITY_STS_PROPERTIES); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = this.clientOptions == null ? this.logOptions.getApplicationId() : this.clientOptions.getApplicationId(); return new UserAgentPolicy( applicationId, clientName, clientVersion, this.configuration); } }
no asserts here, otherwise it will stop the application from starting.
private void sendTelemetry() { if (properties.isAllowTelemetry()) { final Map<String, String> events = new HashMap<>(); final TelemetrySender sender = new TelemetrySender(); String tenantName = properties.getTenantName(); Assert.hasText(tenantName, "tenant name should contains text."); events.put(SERVICE_NAME, getClassPackageSimpleName(AADB2CAutoConfiguration.class)); events.put(TENANT_NAME, tenantName); sender.send(ClassUtils.getUserClass(getClass()).getSimpleName(), events); } }
Assert.hasText(tenantName, "tenant name should contains text.");
private void sendTelemetry() { if (properties.isAllowTelemetry()) { final Map<String, String> events = new HashMap<>(); final TelemetrySender sender = new TelemetrySender(); events.put(SERVICE_NAME, getClassPackageSimpleName(AADB2CAutoConfiguration.class)); events.put(TENANT_NAME, properties.getTenant()); sender.send(ClassUtils.getUserClass(getClass()).getSimpleName(), events); } }
class AADB2CAutoConfiguration { private final ClientRegistrationRepository repository; private final AADB2CProperties properties; public AADB2CAutoConfiguration(@NonNull ClientRegistrationRepository repository, @NonNull AADB2CProperties properties) { this.repository = repository; this.properties = properties; } @Bean @ConditionalOnMissingBean public AADB2CAuthorizationRequestResolver b2cOAuth2AuthorizationRequestResolver() { return new AADB2CAuthorizationRequestResolver(repository, properties); } @Bean @ConditionalOnMissingBean public AADB2CLogoutSuccessHandler b2cLogoutSuccessHandler() { return new AADB2CLogoutSuccessHandler(properties); } @Bean @ConditionalOnMissingBean public AADB2COidcLoginConfigurer b2cLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { return new AADB2COidcLoginConfigurer(handler, resolver); } @PostConstruct /** * Automatic configuration class of AADB2COidc */ @Configuration @ConditionalOnResource(resources = "classpath:aadb2c.enable.config") @ConditionalOnProperty(prefix = AADB2CProperties.PREFIX, value = "oidc-enabled", havingValue = "true", matchIfMissing = true) public static class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(3); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUp()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); } private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getBaseUri())) .tokenUri(AADB2CURL.getTokenUrl(properties.getBaseUri(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getBaseUri(), userFlow)) .userNameAttributeName(properties.getUserNameAttributeName()) .clientName(userFlow) .build(); } } }
class AADB2CAutoConfiguration { private final ClientRegistrationRepository repository; private final AADB2CProperties properties; public AADB2CAutoConfiguration(@NonNull ClientRegistrationRepository repository, @NonNull AADB2CProperties properties) { this.repository = repository; this.properties = properties; } @Bean @ConditionalOnMissingBean public AADB2CAuthorizationRequestResolver b2cOAuth2AuthorizationRequestResolver() { return new AADB2CAuthorizationRequestResolver(repository, properties); } @Bean @ConditionalOnMissingBean public AADB2CLogoutSuccessHandler b2cLogoutSuccessHandler() { return new AADB2CLogoutSuccessHandler(properties); } @Bean @ConditionalOnMissingBean public AADB2COidcLoginConfigurer b2cLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { return new AADB2COidcLoginConfigurer(handler, resolver); } @PostConstruct /** * Automatic configuration class of AADB2COidc */ @Configuration @ConditionalOnResource(resources = "classpath:aadb2c.enable.config") @ConditionalOnProperty(prefix = AADB2CProperties.PREFIX, value = "oidc-enabled", havingValue = "true", matchIfMissing = true) public static class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(3); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUp()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); } private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getBaseUri())) .tokenUri(AADB2CURL.getTokenUrl(properties.getBaseUri(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getBaseUri(), userFlow)) .userNameAttributeName(properties.getUserNameAttributeName()) .clientName(userFlow) .build(); } } }
:disappointed:
public JacksonAdapter() { this.simpleMapper = initializeMapperBuilder(JsonMapper.builder()) .build(); this.headerMapper = initializeMapperBuilder(JsonMapper.builder()) .enable(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES) .build(); this.xmlMapper = initializeMapperBuilder(XmlMapper.builder()) .defaultUseWrapper(false) .enable(ToXmlGenerator.Feature.WRITE_XML_DECLARATION) /* * In Jackson 2.12 the default value of this feature changed from true to false. * https: */ .enable(FromXmlParser.Feature.EMPTY_ELEMENT_AS_NULL) .build(); ObjectMapper flatteningMapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); this.mapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(AdditionalPropertiesSerializer.getModule(flatteningMapper)) .addModule(AdditionalPropertiesDeserializer.getModule(flatteningMapper)) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); }
* https:
public JacksonAdapter() { this.simpleMapper = initializeMapperBuilder(JsonMapper.builder()) .build(); this.headerMapper = initializeMapperBuilder(JsonMapper.builder()) .enable(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES) .build(); this.xmlMapper = initializeMapperBuilder(XmlMapper.builder()) .defaultUseWrapper(false) .enable(ToXmlGenerator.Feature.WRITE_XML_DECLARATION) /* * In Jackson 2.12 the default value of this feature changed from true to false. * https: */ .enable(FromXmlParser.Feature.EMPTY_ELEMENT_AS_NULL) .build(); ObjectMapper flatteningMapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); this.mapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(AdditionalPropertiesSerializer.getModule(flatteningMapper)) .addModule(AdditionalPropertiesDeserializer.getModule(flatteningMapper)) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); }
class JacksonAdapter implements SerializerAdapter { private static final Pattern PATTERN = Pattern.compile("^\"*|\"*$"); private final ClientLogger logger = new ClientLogger(JacksonAdapter.class); /** * An instance of {@link ObjectMapper} to serialize/deserialize objects. */ private final ObjectMapper mapper; /** * An instance of {@link ObjectMapper} that does not do flattening. */ private final ObjectMapper simpleMapper; private final ObjectMapper xmlMapper; private final ObjectMapper headerMapper; /* * The lazily-created serializer for this ServiceClient. */ private static SerializerAdapter serializerAdapter; private final Map<Type, JavaType> typeToJavaTypeCache = new ConcurrentHashMap<>(); /** * Creates a new JacksonAdapter instance with default mapper settings. */ /** * Gets a static instance of {@link ObjectMapper} that doesn't handle flattening. * * @return an instance of {@link ObjectMapper}. */ protected ObjectMapper simpleMapper() { return simpleMapper; } /** * maintain singleton instance of the default serializer adapter. * * @return the default serializer */ public static synchronized SerializerAdapter createDefaultSerializerAdapter() { if (serializerAdapter == null) { serializerAdapter = new JacksonAdapter(); } return serializerAdapter; } /** * @return the original serializer type */ public ObjectMapper serializer() { return mapper; } @Override public String serialize(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serialize(object, encoding, stream); return new String(stream.toByteArray(), 0, stream.size(), StandardCharsets.UTF_8); } @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object == null) { return; } if ((encoding == SerializerEncoding.XML)) { xmlMapper.writeValue(outputStream, object); } else { serializer().writeValue(outputStream, object); } } @Override public String serializeRaw(Object object) { if (object == null) { return null; } try { return PATTERN.matcher(serialize(object, SerializerEncoding.JSON)).replaceAll(""); } catch (IOException ex) { logger.warning("Failed to serialize {} to JSON.", object.getClass(), ex); return null; } } @Override public String serializeList(List<?> list, CollectionFormat format) { if (list == null) { return null; } List<String> serialized = new ArrayList<>(); for (Object element : list) { String raw = serializeRaw(element); serialized.add(raw != null ? raw : ""); } return String.join(format.getDelimiter(), serialized); } @Override public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (CoreUtils.isNullOrEmpty(value)) { return null; } return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, encoding); } @Override public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException { if (inputStream == null) { return null; } final JavaType javaType = createJavaType(type); try { if (encoding == SerializerEncoding.XML) { return xmlMapper.readValue(inputStream, javaType); } else { return serializer().readValue(inputStream, javaType); } } catch (JsonParseException jpe) { throw logger.logExceptionAsError(new MalformedValueException(jpe.getMessage(), jpe)); } } @Override public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { if (deserializedHeadersType == null) { return null; } T deserializedHeaders = headerMapper.convertValue(headers, createJavaType(deserializedHeadersType)); final Class<?> deserializedHeadersClass = TypeUtil.getRawClass(deserializedHeadersType); final Field[] declaredFields = deserializedHeadersClass.getDeclaredFields(); /* * A list containing all handlers for header collections of the header type. */ final List<HeaderCollectionHandler> headerCollectionHandlers = new ArrayList<>(); /* * This set is an optimization where we track the first character of all HeaderCollections defined on the * deserialized headers type. This allows us to optimize away startWiths checks which are much more costly than * getting the first character. */ final Set<Character> headerCollectionsFirstCharacters = new HashSet<>(); /* * Begin by looping over all declared fields and initializing all header collection information. */ for (final Field declaredField : declaredFields) { if (!declaredField.isAnnotationPresent(HeaderCollection.class)) { continue; } final Type declaredFieldType = declaredField.getGenericType(); if (!TypeUtil.isTypeOrSubTypeOf(declaredField.getType(), Map.class)) { continue; } final Type[] mapTypeArguments = TypeUtil.getTypeArguments(declaredFieldType); if (mapTypeArguments.length != 2 || mapTypeArguments[0] != String.class || mapTypeArguments[1] != String.class) { continue; } final HeaderCollection headerCollectionAnnotation = declaredField.getAnnotation(HeaderCollection.class); final String headerCollectionPrefix = headerCollectionAnnotation.value().toLowerCase(Locale.ROOT); final int headerCollectionPrefixLength = headerCollectionPrefix.length(); if (headerCollectionPrefixLength == 0) { continue; } headerCollectionHandlers.add(new HeaderCollectionHandler(headerCollectionPrefix, declaredField)); headerCollectionsFirstCharacters.add(headerCollectionPrefix.charAt(0)); } /* * Then loop over all headers and check if they begin with any of the prefixes found. */ for (final HttpHeader header : headers) { String headerNameLower = header.getName().toLowerCase(Locale.ROOT); for (HeaderCollectionHandler headerCollectionHandler : headerCollectionHandlers) { if (!headerCollectionsFirstCharacters.contains(headerNameLower.charAt(0))) { continue; } if (headerCollectionHandler.headerStartsWithPrefix(headerNameLower)) { headerCollectionHandler.addHeader(header.getName(), header.getValue()); } } } /* * Finally inject all found header collection values into the deserialized headers. */ headerCollectionHandlers.forEach(h -> h.injectValuesIntoDeclaringField(deserializedHeaders, logger)); return deserializedHeaders; } @SuppressWarnings("deprecation") private static <S extends MapperBuilder<?, ?>> S initializeMapperBuilder(S mapper) { mapper.enable(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS) .enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT) .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY) .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS) .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) .serializationInclusion(JsonInclude.Include.NON_NULL) .addModule(new JavaTimeModule()) .addModule(ByteArraySerializer.getModule()) .addModule(Base64UrlSerializer.getModule()) .addModule(DateTimeSerializer.getModule()) .addModule(DateTimeDeserializer.getModule()) .addModule(DateTimeRfc1123Serializer.getModule()) .addModule(DurationSerializer.getModule()) .addModule(HttpHeadersSerializer.getModule()) .addModule(UnixTimeSerializer.getModule()) .visibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY) .visibility(PropertyAccessor.SETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.IS_GETTER, JsonAutoDetect.Visibility.NONE); return mapper; } private JavaType createJavaType(Type type) { if (type == null) { return null; } else if (type instanceof JavaType) { return (JavaType) type; } else if (type instanceof ParameterizedType) { final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] actualTypeArguments = parameterizedType.getActualTypeArguments(); JavaType[] javaTypeArguments = new JavaType[actualTypeArguments.length]; for (int i = 0; i != actualTypeArguments.length; i++) { javaTypeArguments[i] = createJavaType(actualTypeArguments[i]); } return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory() .constructParametricType((Class<?>) parameterizedType.getRawType(), javaTypeArguments)); } else { return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory().constructType(t)); } } /* * Internal helper class that helps manage converting headers into their header collection. */ private static final class HeaderCollectionHandler { private final String prefix; private final int prefixLength; private final Map<String, String> values; private final Field declaringField; HeaderCollectionHandler(String prefix, Field declaringField) { this.prefix = prefix; this.prefixLength = prefix.length(); this.values = new HashMap<>(); this.declaringField = declaringField; } boolean headerStartsWithPrefix(String headerName) { return headerName.startsWith(prefix); } void addHeader(String headerName, String headerValue) { values.put(headerName.substring(prefixLength), headerValue); } @SuppressWarnings("deprecation") void injectValuesIntoDeclaringField(Object deserializedHeaders, ClientLogger logger) { /* * First check if the deserialized headers type has a public setter. */ if (usePublicSetter(deserializedHeaders, logger)) { return; } logger.verbose("Failed to find or use public setter to set header collection."); /* * Otherwise fallback to setting the field directly. */ final boolean declaredFieldAccessibleBackup = declaringField.isAccessible(); try { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(true); return null; }); } declaringField.set(deserializedHeaders, values); logger.verbose("Set header collection by accessing the field directly."); } catch (IllegalAccessException ex) { logger.warning("Failed to inject header collection values into deserialized headers.", ex); } finally { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(false); return null; }); } } } private boolean usePublicSetter(Object deserializedHeaders, ClientLogger logger) { try { String potentialSetterName = getPotentialSetterName(); Method setterMethod = deserializedHeaders.getClass().getDeclaredMethod(potentialSetterName, Map.class); if (Modifier.isPublic(setterMethod.getModifiers())) { setterMethod.invoke(deserializedHeaders, values); logger.verbose("User setter %s on class %s to set header collection.", potentialSetterName, deserializedHeaders.getClass().getSimpleName()); return true; } return false; } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException ignored) { return false; } } private String getPotentialSetterName() { String fieldName = declaringField.getName(); return "set" + fieldName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldName.substring(1); } } }
class JacksonAdapter implements SerializerAdapter { private static final Pattern PATTERN = Pattern.compile("^\"*|\"*$"); private final ClientLogger logger = new ClientLogger(JacksonAdapter.class); /** * An instance of {@link ObjectMapper} to serialize/deserialize objects. */ private final ObjectMapper mapper; /** * An instance of {@link ObjectMapper} that does not do flattening. */ private final ObjectMapper simpleMapper; private final ObjectMapper xmlMapper; private final ObjectMapper headerMapper; /* * The lazily-created serializer for this ServiceClient. */ private static SerializerAdapter serializerAdapter; private final Map<Type, JavaType> typeToJavaTypeCache = new ConcurrentHashMap<>(); /** * Creates a new JacksonAdapter instance with default mapper settings. */ /** * Gets a static instance of {@link ObjectMapper} that doesn't handle flattening. * * @return an instance of {@link ObjectMapper}. */ protected ObjectMapper simpleMapper() { return simpleMapper; } /** * maintain singleton instance of the default serializer adapter. * * @return the default serializer */ public static synchronized SerializerAdapter createDefaultSerializerAdapter() { if (serializerAdapter == null) { serializerAdapter = new JacksonAdapter(); } return serializerAdapter; } /** * @return the original serializer type */ public ObjectMapper serializer() { return mapper; } @Override public String serialize(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serialize(object, encoding, stream); return new String(stream.toByteArray(), 0, stream.size(), StandardCharsets.UTF_8); } @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object == null) { return; } if ((encoding == SerializerEncoding.XML)) { xmlMapper.writeValue(outputStream, object); } else { serializer().writeValue(outputStream, object); } } @Override public String serializeRaw(Object object) { if (object == null) { return null; } try { return PATTERN.matcher(serialize(object, SerializerEncoding.JSON)).replaceAll(""); } catch (IOException ex) { logger.warning("Failed to serialize {} to JSON.", object.getClass(), ex); return null; } } @Override public String serializeList(List<?> list, CollectionFormat format) { if (list == null) { return null; } List<String> serialized = new ArrayList<>(); for (Object element : list) { String raw = serializeRaw(element); serialized.add(raw != null ? raw : ""); } return String.join(format.getDelimiter(), serialized); } @Override public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (CoreUtils.isNullOrEmpty(value)) { return null; } return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, encoding); } @Override public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException { if (inputStream == null) { return null; } final JavaType javaType = createJavaType(type); try { if (encoding == SerializerEncoding.XML) { return xmlMapper.readValue(inputStream, javaType); } else { return serializer().readValue(inputStream, javaType); } } catch (JsonParseException jpe) { throw logger.logExceptionAsError(new MalformedValueException(jpe.getMessage(), jpe)); } } @Override public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { if (deserializedHeadersType == null) { return null; } T deserializedHeaders = headerMapper.convertValue(headers, createJavaType(deserializedHeadersType)); final Class<?> deserializedHeadersClass = TypeUtil.getRawClass(deserializedHeadersType); final Field[] declaredFields = deserializedHeadersClass.getDeclaredFields(); /* * A list containing all handlers for header collections of the header type. */ final List<HeaderCollectionHandler> headerCollectionHandlers = new ArrayList<>(); /* * This set is an optimization where we track the first character of all HeaderCollections defined on the * deserialized headers type. This allows us to optimize away startWiths checks which are much more costly than * getting the first character. */ final Set<Character> headerCollectionsFirstCharacters = new HashSet<>(); /* * Begin by looping over all declared fields and initializing all header collection information. */ for (final Field declaredField : declaredFields) { if (!declaredField.isAnnotationPresent(HeaderCollection.class)) { continue; } final Type declaredFieldType = declaredField.getGenericType(); if (!TypeUtil.isTypeOrSubTypeOf(declaredField.getType(), Map.class)) { continue; } final Type[] mapTypeArguments = TypeUtil.getTypeArguments(declaredFieldType); if (mapTypeArguments.length != 2 || mapTypeArguments[0] != String.class || mapTypeArguments[1] != String.class) { continue; } final HeaderCollection headerCollectionAnnotation = declaredField.getAnnotation(HeaderCollection.class); final String headerCollectionPrefix = headerCollectionAnnotation.value().toLowerCase(Locale.ROOT); final int headerCollectionPrefixLength = headerCollectionPrefix.length(); if (headerCollectionPrefixLength == 0) { continue; } headerCollectionHandlers.add(new HeaderCollectionHandler(headerCollectionPrefix, declaredField)); headerCollectionsFirstCharacters.add(headerCollectionPrefix.charAt(0)); } /* * Then loop over all headers and check if they begin with any of the prefixes found. */ for (final HttpHeader header : headers) { String headerNameLower = header.getName().toLowerCase(Locale.ROOT); /* * Optimization to skip this header as it doesn't begin with any character starting header collections in * the deserialized headers type. */ if (!headerCollectionsFirstCharacters.contains(headerNameLower.charAt(0))) { continue; } for (HeaderCollectionHandler headerCollectionHandler : headerCollectionHandlers) { if (headerCollectionHandler.headerStartsWithPrefix(headerNameLower)) { headerCollectionHandler.addHeader(header.getName(), header.getValue()); } } } /* * Finally inject all found header collection values into the deserialized headers. */ headerCollectionHandlers.forEach(h -> h.injectValuesIntoDeclaringField(deserializedHeaders, logger)); return deserializedHeaders; } @SuppressWarnings("deprecation") private static <S extends MapperBuilder<?, ?>> S initializeMapperBuilder(S mapper) { mapper.enable(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS) .enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT) .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY) .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS) .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) .serializationInclusion(JsonInclude.Include.NON_NULL) .addModule(new JavaTimeModule()) .addModule(ByteArraySerializer.getModule()) .addModule(Base64UrlSerializer.getModule()) .addModule(DateTimeSerializer.getModule()) .addModule(DateTimeDeserializer.getModule()) .addModule(DateTimeRfc1123Serializer.getModule()) .addModule(DurationSerializer.getModule()) .addModule(HttpHeadersSerializer.getModule()) .addModule(UnixTimeSerializer.getModule()) .visibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY) .visibility(PropertyAccessor.SETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.IS_GETTER, JsonAutoDetect.Visibility.NONE); return mapper; } private JavaType createJavaType(Type type) { if (type == null) { return null; } else if (type instanceof JavaType) { return (JavaType) type; } else if (type instanceof ParameterizedType) { final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] actualTypeArguments = parameterizedType.getActualTypeArguments(); JavaType[] javaTypeArguments = new JavaType[actualTypeArguments.length]; for (int i = 0; i != actualTypeArguments.length; i++) { javaTypeArguments[i] = createJavaType(actualTypeArguments[i]); } return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory() .constructParametricType((Class<?>) parameterizedType.getRawType(), javaTypeArguments)); } else { return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory().constructType(t)); } } /* * Internal helper class that helps manage converting headers into their header collection. */ private static final class HeaderCollectionHandler { private final String prefix; private final int prefixLength; private final Map<String, String> values; private final Field declaringField; HeaderCollectionHandler(String prefix, Field declaringField) { this.prefix = prefix; this.prefixLength = prefix.length(); this.values = new HashMap<>(); this.declaringField = declaringField; } boolean headerStartsWithPrefix(String headerName) { return headerName.startsWith(prefix); } void addHeader(String headerName, String headerValue) { values.put(headerName.substring(prefixLength), headerValue); } @SuppressWarnings("deprecation") void injectValuesIntoDeclaringField(Object deserializedHeaders, ClientLogger logger) { /* * First check if the deserialized headers type has a public setter. */ if (usePublicSetter(deserializedHeaders, logger)) { return; } logger.verbose("Failed to find or use public setter to set header collection."); /* * Otherwise fallback to setting the field directly. */ final boolean declaredFieldAccessibleBackup = declaringField.isAccessible(); try { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(true); return null; }); } declaringField.set(deserializedHeaders, values); logger.verbose("Set header collection by accessing the field directly."); } catch (IllegalAccessException ex) { logger.warning("Failed to inject header collection values into deserialized headers.", ex); } finally { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(false); return null; }); } } } private boolean usePublicSetter(Object deserializedHeaders, ClientLogger logger) { try { String potentialSetterName = getPotentialSetterName(); Method setterMethod = deserializedHeaders.getClass().getDeclaredMethod(potentialSetterName, Map.class); if (Modifier.isPublic(setterMethod.getModifiers())) { setterMethod.invoke(deserializedHeaders, values); logger.verbose("User setter %s on class %s to set header collection.", potentialSetterName, deserializedHeaders.getClass().getSimpleName()); return true; } return false; } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException ignored) { return false; } } private String getPotentialSetterName() { String fieldName = declaringField.getName(); return "set" + fieldName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldName.substring(1); } } }
If we are going to check the validity of the properties, we should add it to the property class.
private void sendTelemetry() { if (properties.isAllowTelemetry()) { final Map<String, String> events = new HashMap<>(); final TelemetrySender sender = new TelemetrySender(); String tenantName = properties.getTenantName(); Assert.hasText(tenantName, "tenant name should contains text."); events.put(SERVICE_NAME, getClassPackageSimpleName(AADB2CAutoConfiguration.class)); events.put(TENANT_NAME, tenantName); sender.send(ClassUtils.getUserClass(getClass()).getSimpleName(), events); } }
Assert.hasText(tenantName, "tenant name should contains text.");
private void sendTelemetry() { if (properties.isAllowTelemetry()) { final Map<String, String> events = new HashMap<>(); final TelemetrySender sender = new TelemetrySender(); events.put(SERVICE_NAME, getClassPackageSimpleName(AADB2CAutoConfiguration.class)); events.put(TENANT_NAME, properties.getTenant()); sender.send(ClassUtils.getUserClass(getClass()).getSimpleName(), events); } }
class AADB2CAutoConfiguration { private final ClientRegistrationRepository repository; private final AADB2CProperties properties; public AADB2CAutoConfiguration(@NonNull ClientRegistrationRepository repository, @NonNull AADB2CProperties properties) { this.repository = repository; this.properties = properties; } @Bean @ConditionalOnMissingBean public AADB2CAuthorizationRequestResolver b2cOAuth2AuthorizationRequestResolver() { return new AADB2CAuthorizationRequestResolver(repository, properties); } @Bean @ConditionalOnMissingBean public AADB2CLogoutSuccessHandler b2cLogoutSuccessHandler() { return new AADB2CLogoutSuccessHandler(properties); } @Bean @ConditionalOnMissingBean public AADB2COidcLoginConfigurer b2cLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { return new AADB2COidcLoginConfigurer(handler, resolver); } @PostConstruct /** * Automatic configuration class of AADB2COidc */ @Configuration @ConditionalOnResource(resources = "classpath:aadb2c.enable.config") @ConditionalOnProperty(prefix = AADB2CProperties.PREFIX, value = "oidc-enabled", havingValue = "true", matchIfMissing = true) public static class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(3); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUp()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); } private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getBaseUri())) .tokenUri(AADB2CURL.getTokenUrl(properties.getBaseUri(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getBaseUri(), userFlow)) .userNameAttributeName(properties.getUserNameAttributeName()) .clientName(userFlow) .build(); } } }
class AADB2CAutoConfiguration { private final ClientRegistrationRepository repository; private final AADB2CProperties properties; public AADB2CAutoConfiguration(@NonNull ClientRegistrationRepository repository, @NonNull AADB2CProperties properties) { this.repository = repository; this.properties = properties; } @Bean @ConditionalOnMissingBean public AADB2CAuthorizationRequestResolver b2cOAuth2AuthorizationRequestResolver() { return new AADB2CAuthorizationRequestResolver(repository, properties); } @Bean @ConditionalOnMissingBean public AADB2CLogoutSuccessHandler b2cLogoutSuccessHandler() { return new AADB2CLogoutSuccessHandler(properties); } @Bean @ConditionalOnMissingBean public AADB2COidcLoginConfigurer b2cLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { return new AADB2COidcLoginConfigurer(handler, resolver); } @PostConstruct /** * Automatic configuration class of AADB2COidc */ @Configuration @ConditionalOnResource(resources = "classpath:aadb2c.enable.config") @ConditionalOnProperty(prefix = AADB2CProperties.PREFIX, value = "oidc-enabled", havingValue = "true", matchIfMissing = true) public static class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(3); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignIn()); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUp()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); } private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getBaseUri())) .tokenUri(AADB2CURL.getTokenUrl(properties.getBaseUri(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getBaseUri(), userFlow)) .userNameAttributeName(properties.getUserNameAttributeName()) .clientName(userFlow) .build(); } } }
the logic still is not consistent with our description
public void afterPropertiesSet() { if (StringUtils.isEmpty(tenant) && StringUtils.isEmpty(baseUri)) { throw new AADB2CConfigurationException("'tenant' and 'baseUri' at least configure one item."); } if (StringUtils.hasText(baseUri) && StringUtils.isEmpty(tenant)) { baseUri = AADB2CURL.addSlash(baseUri); if (!Pattern.compile(BASE_URI_REGEX).matcher(baseUri).matches()) { throw new AADB2CConfigurationException("'baseUri' is invalid."); } Matcher matcher = Pattern.compile(TENANT_NAME_PART_REGEX).matcher(baseUri); if (matcher.find()) { String matched = matcher.group(); tenant = matched.substring(0, matched.length() - 2); } } else { baseUri = String.format("https: } }
if (StringUtils.hasText(baseUri) && StringUtils.isEmpty(tenant)) {
public void afterPropertiesSet() { if (StringUtils.isEmpty(tenant) && StringUtils.isEmpty(baseUri)) { throw new AADB2CConfigurationException("'tenant' and 'baseUri' at least configure one item."); } }
class AADB2CProperties implements InitializingBean { private static final String USER_FLOWS = "user-flows"; /** * We do not use ${@link String * annotation. */ public static final String USER_FLOW_PASSWORD_RESET = USER_FLOWS + ".password-reset"; public static final String USER_FLOW_PROFILE_EDIT = USER_FLOWS + ".profile-edit"; public static final String USER_FLOW_SIGN_UP_OR_SIGN_IN = USER_FLOWS + ".sign-up-or-sign-in"; public static final String USER_FLOW_SIGN_UP = USER_FLOWS + ".sign-up"; public static final String USER_FLOW_SIGN_IN = USER_FLOWS + ".sign-in"; public static final String DEFAULT_LOGOUT_SUCCESS_URL = "http: public static final String PREFIX = "azure.activedirectory.b2c"; private static final String BASE_URI_REGEX = "(https: private static final String TENANT_NAME_PART_REGEX = "([A-Za-z0-9]{2,}\\.)"; /** * The name of the b2c tenant. * @deprecated It's recommended to use 'baseUri' instead. */ @Deprecated private String tenant; /** * Use OIDC ${@link OidcAuthorizationCodeAuthenticationProvider} by default. If set to false, will use Oauth2 * ${@link OAuth2AuthorizationCodeAuthenticationProvider}. */ private Boolean oidcEnabled = true; /** * The application ID that registered under b2c tenant. */ @NotBlank(message = "client ID should not be blank") private String clientId; /** * The application secret that registered under b2c tenant. */ @NotBlank(message = "client secret should not be blank") private String clientSecret; @URL(message = "logout success should be valid URL") private String logoutSuccessUrl = DEFAULT_LOGOUT_SUCCESS_URL; private Map<String, Object> authenticateAdditionalParameters; /** * User name attribute name */ private String userNameAttributeName; /** * The all user flows which is created under b2c tenant. */ private UserFlows userFlows = new UserFlows(); /** * Telemetry data will be collected if true, or disable data collection. */ private boolean allowTelemetry = true; private String replyUrl = "{baseUrl}/login/oauth2/code/"; /** * AAD B2C endpoint base uri. */ @URL(message = "baseUri should be valid URL") private String baseUri; @Override /** * UserFlows */ @Validated public static class UserFlows { protected UserFlows() { } /** * The sign-up-or-sign-in user flow which is created under b2c tenant. */ @NotBlank(message = "sign-up-or-in value should not be blank") private String signUpOrSignIn; /** * The profile-edit user flow which is created under b2c tenant. */ private String profileEdit; /** * The password-reset user flow which is created under b2c tenant. */ private String passwordReset; /** * The sign-up user flow which is created under b2c tenant. */ private String signUp; /** * The sign-in user flow which is created under b2c tenant. */ private String signIn; public String getSignUp() { return signUp; } public void setSignUp(String signUp) { this.signUp = signUp; } public String getSignIn() { return signIn; } public void setSignIn(String signIn) { this.signIn = signIn; } public String getSignUpOrSignIn() { return signUpOrSignIn; } public void setSignUpOrSignIn(String signUpOrSignIn) { this.signUpOrSignIn = signUpOrSignIn; } public String getProfileEdit() { return profileEdit; } public void setProfileEdit(String profileEdit) { this.profileEdit = profileEdit; } public String getPasswordReset() { return passwordReset; } public void setPasswordReset(String passwordReset) { this.passwordReset = passwordReset; } } public String getBaseUri() { return baseUri; } public void setBaseUri(String baseUri) { this.baseUri = baseUri; } public void setTenant(String tenant) { this.tenant = tenant; } @DeprecatedConfigurationProperty( reason = "Configuration updated to baseUri", replacement = "azure.activedirectory.b2c.base-uri") public String getTenant() { return tenant; } public Boolean getOidcEnabled() { return oidcEnabled; } public void setOidcEnabled(Boolean oidcEnabled) { this.oidcEnabled = oidcEnabled; } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public String getClientSecret() { return clientSecret; } public void setClientSecret(String clientSecret) { this.clientSecret = clientSecret; } public String getLogoutSuccessUrl() { return logoutSuccessUrl; } public void setLogoutSuccessUrl(String logoutSuccessUrl) { this.logoutSuccessUrl = logoutSuccessUrl; } public Map<String, Object> getAuthenticateAdditionalParameters() { return authenticateAdditionalParameters; } public void setAuthenticateAdditionalParameters(Map<String, Object> authenticateAdditionalParameters) { this.authenticateAdditionalParameters = authenticateAdditionalParameters; } public UserFlows getUserFlows() { return userFlows; } public void setUserFlows(UserFlows userFlows) { this.userFlows = userFlows; } public boolean isAllowTelemetry() { return allowTelemetry; } public void setAllowTelemetry(boolean allowTelemetry) { this.allowTelemetry = allowTelemetry; } public String getUserNameAttributeName() { return userNameAttributeName; } public void setUserNameAttributeName(String userNameAttributeName) { this.userNameAttributeName = userNameAttributeName; } public String getReplyUrl() { return replyUrl; } public void setReplyUrl(String replyUrl) { this.replyUrl = replyUrl; } }
class AADB2CProperties implements InitializingBean { private static final String USER_FLOWS = "user-flows"; /** * We do not use ${@link String * annotation. */ public static final String USER_FLOW_PASSWORD_RESET = USER_FLOWS + ".password-reset"; public static final String USER_FLOW_PROFILE_EDIT = USER_FLOWS + ".profile-edit"; public static final String USER_FLOW_SIGN_UP_OR_SIGN_IN = USER_FLOWS + ".sign-up-or-sign-in"; public static final String USER_FLOW_SIGN_UP = USER_FLOWS + ".sign-up"; public static final String USER_FLOW_SIGN_IN = USER_FLOWS + ".sign-in"; public static final String DEFAULT_LOGOUT_SUCCESS_URL = "http: public static final String PREFIX = "azure.activedirectory.b2c"; private static final String TENANT_NAME_PART_REGEX = "([A-Za-z0-9]+\\.)"; /** * The name of the b2c tenant. * @deprecated It's recommended to use 'baseUri' instead. */ @Deprecated private String tenant; /** * Use OIDC ${@link OidcAuthorizationCodeAuthenticationProvider} by default. If set to false, will use Oauth2 * ${@link OAuth2AuthorizationCodeAuthenticationProvider}. */ private Boolean oidcEnabled = true; /** * The application ID that registered under b2c tenant. */ @NotBlank(message = "client ID should not be blank") private String clientId; /** * The application secret that registered under b2c tenant. */ @NotBlank(message = "client secret should not be blank") private String clientSecret; @URL(message = "logout success should be valid URL") private String logoutSuccessUrl = DEFAULT_LOGOUT_SUCCESS_URL; private Map<String, Object> authenticateAdditionalParameters; /** * User name attribute name */ private String userNameAttributeName; /** * The all user flows which is created under b2c tenant. */ private UserFlows userFlows = new UserFlows(); /** * Telemetry data will be collected if true, or disable data collection. */ private boolean allowTelemetry = true; private String replyUrl = "{baseUrl}/login/oauth2/code/"; /** * AAD B2C endpoint base uri. */ @URL(message = "baseUri should be valid URL") private String baseUri; @Override /** * UserFlows */ @Validated public static class UserFlows { protected UserFlows() { } /** * The sign-up-or-sign-in user flow which is created under b2c tenant. */ @NotBlank(message = "sign-up-or-in value should not be blank") private String signUpOrSignIn; /** * The profile-edit user flow which is created under b2c tenant. */ private String profileEdit; /** * The password-reset user flow which is created under b2c tenant. */ private String passwordReset; /** * The sign-up user flow which is created under b2c tenant. */ private String signUp; /** * The sign-in user flow which is created under b2c tenant. */ private String signIn; public String getSignUp() { return signUp; } public void setSignUp(String signUp) { this.signUp = signUp; } public String getSignIn() { return signIn; } public void setSignIn(String signIn) { this.signIn = signIn; } public String getSignUpOrSignIn() { return signUpOrSignIn; } public void setSignUpOrSignIn(String signUpOrSignIn) { this.signUpOrSignIn = signUpOrSignIn; } public String getProfileEdit() { return profileEdit; } public void setProfileEdit(String profileEdit) { this.profileEdit = profileEdit; } public String getPasswordReset() { return passwordReset; } public void setPasswordReset(String passwordReset) { this.passwordReset = passwordReset; } } public String getBaseUri() { if (StringUtils.hasText(tenant) && StringUtils.isEmpty(baseUri)) { return String.format("https: } return baseUri; } public void setBaseUri(String baseUri) { this.baseUri = baseUri; } public void setTenant(String tenant) { this.tenant = tenant; } /** * Get tenant name for Telemetry * @return tenant name * @throws AADB2CConfigurationException resolve tenant name failed */ @DeprecatedConfigurationProperty( reason = "Configuration updated to baseUri", replacement = "azure.activedirectory.b2c.base-uri") public String getTenant() { if (StringUtils.hasText(baseUri)) { Matcher matcher = Pattern.compile(TENANT_NAME_PART_REGEX).matcher(baseUri); if (matcher.find()) { String matched = matcher.group(); return matched.substring(0, matched.length() - 1); } throw new AADB2CConfigurationException("Unable to resolve the 'tenant' name."); } return tenant; } public Boolean getOidcEnabled() { return oidcEnabled; } public void setOidcEnabled(Boolean oidcEnabled) { this.oidcEnabled = oidcEnabled; } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public String getClientSecret() { return clientSecret; } public void setClientSecret(String clientSecret) { this.clientSecret = clientSecret; } public String getLogoutSuccessUrl() { return logoutSuccessUrl; } public void setLogoutSuccessUrl(String logoutSuccessUrl) { this.logoutSuccessUrl = logoutSuccessUrl; } public Map<String, Object> getAuthenticateAdditionalParameters() { return authenticateAdditionalParameters; } public void setAuthenticateAdditionalParameters(Map<String, Object> authenticateAdditionalParameters) { this.authenticateAdditionalParameters = authenticateAdditionalParameters; } public UserFlows getUserFlows() { return userFlows; } public void setUserFlows(UserFlows userFlows) { this.userFlows = userFlows; } public boolean isAllowTelemetry() { return allowTelemetry; } public void setAllowTelemetry(boolean allowTelemetry) { this.allowTelemetry = allowTelemetry; } public String getUserNameAttributeName() { return userNameAttributeName; } public void setUserNameAttributeName(String userNameAttributeName) { this.userNameAttributeName = userNameAttributeName; } public String getReplyUrl() { return replyUrl; } public void setReplyUrl(String replyUrl) { this.replyUrl = replyUrl; } }
What I meant is this add slash part should be done when we construct the endpoints, not here.
public void afterPropertiesSet() { if (StringUtils.isEmpty(tenant) && StringUtils.isEmpty(baseUri)) { throw new AADB2CConfigurationException("'tenant' and 'baseUri' at least configure one item."); } if (StringUtils.hasText(baseUri) && StringUtils.isEmpty(tenant)) { baseUri = AADB2CURL.addSlash(baseUri); if (!Pattern.compile(BASE_URI_REGEX).matcher(baseUri).matches()) { throw new AADB2CConfigurationException("'baseUri' is invalid."); } Matcher matcher = Pattern.compile(TENANT_NAME_PART_REGEX).matcher(baseUri); if (matcher.find()) { String matched = matcher.group(); tenant = matched.substring(0, matched.length() - 2); } } else { baseUri = String.format("https: } }
baseUri = AADB2CURL.addSlash(baseUri);
public void afterPropertiesSet() { if (StringUtils.isEmpty(tenant) && StringUtils.isEmpty(baseUri)) { throw new AADB2CConfigurationException("'tenant' and 'baseUri' at least configure one item."); } }
class AADB2CProperties implements InitializingBean { private static final String USER_FLOWS = "user-flows"; /** * We do not use ${@link String * annotation. */ public static final String USER_FLOW_PASSWORD_RESET = USER_FLOWS + ".password-reset"; public static final String USER_FLOW_PROFILE_EDIT = USER_FLOWS + ".profile-edit"; public static final String USER_FLOW_SIGN_UP_OR_SIGN_IN = USER_FLOWS + ".sign-up-or-sign-in"; public static final String USER_FLOW_SIGN_UP = USER_FLOWS + ".sign-up"; public static final String USER_FLOW_SIGN_IN = USER_FLOWS + ".sign-in"; public static final String DEFAULT_LOGOUT_SUCCESS_URL = "http: public static final String PREFIX = "azure.activedirectory.b2c"; private static final String BASE_URI_REGEX = "(https: private static final String TENANT_NAME_PART_REGEX = "([A-Za-z0-9]{2,}\\.)"; /** * The name of the b2c tenant. * @deprecated It's recommended to use 'baseUri' instead. */ @Deprecated private String tenant; /** * Use OIDC ${@link OidcAuthorizationCodeAuthenticationProvider} by default. If set to false, will use Oauth2 * ${@link OAuth2AuthorizationCodeAuthenticationProvider}. */ private Boolean oidcEnabled = true; /** * The application ID that registered under b2c tenant. */ @NotBlank(message = "client ID should not be blank") private String clientId; /** * The application secret that registered under b2c tenant. */ @NotBlank(message = "client secret should not be blank") private String clientSecret; @URL(message = "logout success should be valid URL") private String logoutSuccessUrl = DEFAULT_LOGOUT_SUCCESS_URL; private Map<String, Object> authenticateAdditionalParameters; /** * User name attribute name */ private String userNameAttributeName; /** * The all user flows which is created under b2c tenant. */ private UserFlows userFlows = new UserFlows(); /** * Telemetry data will be collected if true, or disable data collection. */ private boolean allowTelemetry = true; private String replyUrl = "{baseUrl}/login/oauth2/code/"; /** * AAD B2C endpoint base uri. */ @URL(message = "baseUri should be valid URL") private String baseUri; @Override /** * UserFlows */ @Validated public static class UserFlows { protected UserFlows() { } /** * The sign-up-or-sign-in user flow which is created under b2c tenant. */ @NotBlank(message = "sign-up-or-in value should not be blank") private String signUpOrSignIn; /** * The profile-edit user flow which is created under b2c tenant. */ private String profileEdit; /** * The password-reset user flow which is created under b2c tenant. */ private String passwordReset; /** * The sign-up user flow which is created under b2c tenant. */ private String signUp; /** * The sign-in user flow which is created under b2c tenant. */ private String signIn; public String getSignUp() { return signUp; } public void setSignUp(String signUp) { this.signUp = signUp; } public String getSignIn() { return signIn; } public void setSignIn(String signIn) { this.signIn = signIn; } public String getSignUpOrSignIn() { return signUpOrSignIn; } public void setSignUpOrSignIn(String signUpOrSignIn) { this.signUpOrSignIn = signUpOrSignIn; } public String getProfileEdit() { return profileEdit; } public void setProfileEdit(String profileEdit) { this.profileEdit = profileEdit; } public String getPasswordReset() { return passwordReset; } public void setPasswordReset(String passwordReset) { this.passwordReset = passwordReset; } } public String getBaseUri() { return baseUri; } public void setBaseUri(String baseUri) { this.baseUri = baseUri; } public void setTenant(String tenant) { this.tenant = tenant; } @DeprecatedConfigurationProperty( reason = "Configuration updated to baseUri", replacement = "azure.activedirectory.b2c.base-uri") public String getTenant() { return tenant; } public Boolean getOidcEnabled() { return oidcEnabled; } public void setOidcEnabled(Boolean oidcEnabled) { this.oidcEnabled = oidcEnabled; } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public String getClientSecret() { return clientSecret; } public void setClientSecret(String clientSecret) { this.clientSecret = clientSecret; } public String getLogoutSuccessUrl() { return logoutSuccessUrl; } public void setLogoutSuccessUrl(String logoutSuccessUrl) { this.logoutSuccessUrl = logoutSuccessUrl; } public Map<String, Object> getAuthenticateAdditionalParameters() { return authenticateAdditionalParameters; } public void setAuthenticateAdditionalParameters(Map<String, Object> authenticateAdditionalParameters) { this.authenticateAdditionalParameters = authenticateAdditionalParameters; } public UserFlows getUserFlows() { return userFlows; } public void setUserFlows(UserFlows userFlows) { this.userFlows = userFlows; } public boolean isAllowTelemetry() { return allowTelemetry; } public void setAllowTelemetry(boolean allowTelemetry) { this.allowTelemetry = allowTelemetry; } public String getUserNameAttributeName() { return userNameAttributeName; } public void setUserNameAttributeName(String userNameAttributeName) { this.userNameAttributeName = userNameAttributeName; } public String getReplyUrl() { return replyUrl; } public void setReplyUrl(String replyUrl) { this.replyUrl = replyUrl; } }
class AADB2CProperties implements InitializingBean { private static final String USER_FLOWS = "user-flows"; /** * We do not use ${@link String * annotation. */ public static final String USER_FLOW_PASSWORD_RESET = USER_FLOWS + ".password-reset"; public static final String USER_FLOW_PROFILE_EDIT = USER_FLOWS + ".profile-edit"; public static final String USER_FLOW_SIGN_UP_OR_SIGN_IN = USER_FLOWS + ".sign-up-or-sign-in"; public static final String USER_FLOW_SIGN_UP = USER_FLOWS + ".sign-up"; public static final String USER_FLOW_SIGN_IN = USER_FLOWS + ".sign-in"; public static final String DEFAULT_LOGOUT_SUCCESS_URL = "http: public static final String PREFIX = "azure.activedirectory.b2c"; private static final String TENANT_NAME_PART_REGEX = "([A-Za-z0-9]+\\.)"; /** * The name of the b2c tenant. * @deprecated It's recommended to use 'baseUri' instead. */ @Deprecated private String tenant; /** * Use OIDC ${@link OidcAuthorizationCodeAuthenticationProvider} by default. If set to false, will use Oauth2 * ${@link OAuth2AuthorizationCodeAuthenticationProvider}. */ private Boolean oidcEnabled = true; /** * The application ID that registered under b2c tenant. */ @NotBlank(message = "client ID should not be blank") private String clientId; /** * The application secret that registered under b2c tenant. */ @NotBlank(message = "client secret should not be blank") private String clientSecret; @URL(message = "logout success should be valid URL") private String logoutSuccessUrl = DEFAULT_LOGOUT_SUCCESS_URL; private Map<String, Object> authenticateAdditionalParameters; /** * User name attribute name */ private String userNameAttributeName; /** * The all user flows which is created under b2c tenant. */ private UserFlows userFlows = new UserFlows(); /** * Telemetry data will be collected if true, or disable data collection. */ private boolean allowTelemetry = true; private String replyUrl = "{baseUrl}/login/oauth2/code/"; /** * AAD B2C endpoint base uri. */ @URL(message = "baseUri should be valid URL") private String baseUri; @Override /** * UserFlows */ @Validated public static class UserFlows { protected UserFlows() { } /** * The sign-up-or-sign-in user flow which is created under b2c tenant. */ @NotBlank(message = "sign-up-or-in value should not be blank") private String signUpOrSignIn; /** * The profile-edit user flow which is created under b2c tenant. */ private String profileEdit; /** * The password-reset user flow which is created under b2c tenant. */ private String passwordReset; /** * The sign-up user flow which is created under b2c tenant. */ private String signUp; /** * The sign-in user flow which is created under b2c tenant. */ private String signIn; public String getSignUp() { return signUp; } public void setSignUp(String signUp) { this.signUp = signUp; } public String getSignIn() { return signIn; } public void setSignIn(String signIn) { this.signIn = signIn; } public String getSignUpOrSignIn() { return signUpOrSignIn; } public void setSignUpOrSignIn(String signUpOrSignIn) { this.signUpOrSignIn = signUpOrSignIn; } public String getProfileEdit() { return profileEdit; } public void setProfileEdit(String profileEdit) { this.profileEdit = profileEdit; } public String getPasswordReset() { return passwordReset; } public void setPasswordReset(String passwordReset) { this.passwordReset = passwordReset; } } public String getBaseUri() { if (StringUtils.hasText(tenant) && StringUtils.isEmpty(baseUri)) { return String.format("https: } return baseUri; } public void setBaseUri(String baseUri) { this.baseUri = baseUri; } public void setTenant(String tenant) { this.tenant = tenant; } /** * Get tenant name for Telemetry * @return tenant name * @throws AADB2CConfigurationException resolve tenant name failed */ @DeprecatedConfigurationProperty( reason = "Configuration updated to baseUri", replacement = "azure.activedirectory.b2c.base-uri") public String getTenant() { if (StringUtils.hasText(baseUri)) { Matcher matcher = Pattern.compile(TENANT_NAME_PART_REGEX).matcher(baseUri); if (matcher.find()) { String matched = matcher.group(); return matched.substring(0, matched.length() - 1); } throw new AADB2CConfigurationException("Unable to resolve the 'tenant' name."); } return tenant; } public Boolean getOidcEnabled() { return oidcEnabled; } public void setOidcEnabled(Boolean oidcEnabled) { this.oidcEnabled = oidcEnabled; } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public String getClientSecret() { return clientSecret; } public void setClientSecret(String clientSecret) { this.clientSecret = clientSecret; } public String getLogoutSuccessUrl() { return logoutSuccessUrl; } public void setLogoutSuccessUrl(String logoutSuccessUrl) { this.logoutSuccessUrl = logoutSuccessUrl; } public Map<String, Object> getAuthenticateAdditionalParameters() { return authenticateAdditionalParameters; } public void setAuthenticateAdditionalParameters(Map<String, Object> authenticateAdditionalParameters) { this.authenticateAdditionalParameters = authenticateAdditionalParameters; } public UserFlows getUserFlows() { return userFlows; } public void setUserFlows(UserFlows userFlows) { this.userFlows = userFlows; } public boolean isAllowTelemetry() { return allowTelemetry; } public void setAllowTelemetry(boolean allowTelemetry) { this.allowTelemetry = allowTelemetry; } public String getUserNameAttributeName() { return userNameAttributeName; } public void setUserNameAttributeName(String userNameAttributeName) { this.userNameAttributeName = userNameAttributeName; } public String getReplyUrl() { return replyUrl; } public void setReplyUrl(String replyUrl) { this.replyUrl = replyUrl; } }
😞
public JacksonAdapter() { this.simpleMapper = initializeMapperBuilder(JsonMapper.builder()) .build(); this.headerMapper = initializeMapperBuilder(JsonMapper.builder()) .enable(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES) .build(); this.xmlMapper = initializeMapperBuilder(XmlMapper.builder()) .defaultUseWrapper(false) .enable(ToXmlGenerator.Feature.WRITE_XML_DECLARATION) /* * In Jackson 2.12 the default value of this feature changed from true to false. * https: */ .enable(FromXmlParser.Feature.EMPTY_ELEMENT_AS_NULL) .build(); ObjectMapper flatteningMapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); this.mapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(AdditionalPropertiesSerializer.getModule(flatteningMapper)) .addModule(AdditionalPropertiesDeserializer.getModule(flatteningMapper)) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); }
* https:
public JacksonAdapter() { this.simpleMapper = initializeMapperBuilder(JsonMapper.builder()) .build(); this.headerMapper = initializeMapperBuilder(JsonMapper.builder()) .enable(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES) .build(); this.xmlMapper = initializeMapperBuilder(XmlMapper.builder()) .defaultUseWrapper(false) .enable(ToXmlGenerator.Feature.WRITE_XML_DECLARATION) /* * In Jackson 2.12 the default value of this feature changed from true to false. * https: */ .enable(FromXmlParser.Feature.EMPTY_ELEMENT_AS_NULL) .build(); ObjectMapper flatteningMapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); this.mapper = initializeMapperBuilder(JsonMapper.builder()) .addModule(AdditionalPropertiesSerializer.getModule(flatteningMapper)) .addModule(AdditionalPropertiesDeserializer.getModule(flatteningMapper)) .addModule(FlatteningSerializer.getModule(simpleMapper())) .addModule(FlatteningDeserializer.getModule(simpleMapper())) .build(); }
class JacksonAdapter implements SerializerAdapter { private static final Pattern PATTERN = Pattern.compile("^\"*|\"*$"); private final ClientLogger logger = new ClientLogger(JacksonAdapter.class); /** * An instance of {@link ObjectMapper} to serialize/deserialize objects. */ private final ObjectMapper mapper; /** * An instance of {@link ObjectMapper} that does not do flattening. */ private final ObjectMapper simpleMapper; private final ObjectMapper xmlMapper; private final ObjectMapper headerMapper; /* * The lazily-created serializer for this ServiceClient. */ private static SerializerAdapter serializerAdapter; private final Map<Type, JavaType> typeToJavaTypeCache = new ConcurrentHashMap<>(); /** * Creates a new JacksonAdapter instance with default mapper settings. */ /** * Gets a static instance of {@link ObjectMapper} that doesn't handle flattening. * * @return an instance of {@link ObjectMapper}. */ protected ObjectMapper simpleMapper() { return simpleMapper; } /** * maintain singleton instance of the default serializer adapter. * * @return the default serializer */ public static synchronized SerializerAdapter createDefaultSerializerAdapter() { if (serializerAdapter == null) { serializerAdapter = new JacksonAdapter(); } return serializerAdapter; } /** * @return the original serializer type */ public ObjectMapper serializer() { return mapper; } @Override public String serialize(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serialize(object, encoding, stream); return new String(stream.toByteArray(), 0, stream.size(), StandardCharsets.UTF_8); } @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object == null) { return; } if ((encoding == SerializerEncoding.XML)) { xmlMapper.writeValue(outputStream, object); } else { serializer().writeValue(outputStream, object); } } @Override public String serializeRaw(Object object) { if (object == null) { return null; } try { return PATTERN.matcher(serialize(object, SerializerEncoding.JSON)).replaceAll(""); } catch (IOException ex) { logger.warning("Failed to serialize {} to JSON.", object.getClass(), ex); return null; } } @Override public String serializeList(List<?> list, CollectionFormat format) { if (list == null) { return null; } List<String> serialized = new ArrayList<>(); for (Object element : list) { String raw = serializeRaw(element); serialized.add(raw != null ? raw : ""); } return String.join(format.getDelimiter(), serialized); } @Override public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (CoreUtils.isNullOrEmpty(value)) { return null; } return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, encoding); } @Override public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException { if (inputStream == null) { return null; } final JavaType javaType = createJavaType(type); try { if (encoding == SerializerEncoding.XML) { return xmlMapper.readValue(inputStream, javaType); } else { return serializer().readValue(inputStream, javaType); } } catch (JsonParseException jpe) { throw logger.logExceptionAsError(new MalformedValueException(jpe.getMessage(), jpe)); } } @Override public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { if (deserializedHeadersType == null) { return null; } T deserializedHeaders = headerMapper.convertValue(headers, createJavaType(deserializedHeadersType)); final Class<?> deserializedHeadersClass = TypeUtil.getRawClass(deserializedHeadersType); final Field[] declaredFields = deserializedHeadersClass.getDeclaredFields(); /* * A list containing all handlers for header collections of the header type. */ final List<HeaderCollectionHandler> headerCollectionHandlers = new ArrayList<>(); /* * This set is an optimization where we track the first character of all HeaderCollections defined on the * deserialized headers type. This allows us to optimize away startWiths checks which are much more costly than * getting the first character. */ final Set<Character> headerCollectionsFirstCharacters = new HashSet<>(); /* * Begin by looping over all declared fields and initializing all header collection information. */ for (final Field declaredField : declaredFields) { if (!declaredField.isAnnotationPresent(HeaderCollection.class)) { continue; } final Type declaredFieldType = declaredField.getGenericType(); if (!TypeUtil.isTypeOrSubTypeOf(declaredField.getType(), Map.class)) { continue; } final Type[] mapTypeArguments = TypeUtil.getTypeArguments(declaredFieldType); if (mapTypeArguments.length != 2 || mapTypeArguments[0] != String.class || mapTypeArguments[1] != String.class) { continue; } final HeaderCollection headerCollectionAnnotation = declaredField.getAnnotation(HeaderCollection.class); final String headerCollectionPrefix = headerCollectionAnnotation.value().toLowerCase(Locale.ROOT); final int headerCollectionPrefixLength = headerCollectionPrefix.length(); if (headerCollectionPrefixLength == 0) { continue; } headerCollectionHandlers.add(new HeaderCollectionHandler(headerCollectionPrefix, declaredField)); headerCollectionsFirstCharacters.add(headerCollectionPrefix.charAt(0)); } /* * Then loop over all headers and check if they begin with any of the prefixes found. */ for (final HttpHeader header : headers) { String headerNameLower = header.getName().toLowerCase(Locale.ROOT); for (HeaderCollectionHandler headerCollectionHandler : headerCollectionHandlers) { if (!headerCollectionsFirstCharacters.contains(headerNameLower.charAt(0))) { continue; } if (headerCollectionHandler.headerStartsWithPrefix(headerNameLower)) { headerCollectionHandler.addHeader(header.getName(), header.getValue()); } } } /* * Finally inject all found header collection values into the deserialized headers. */ headerCollectionHandlers.forEach(h -> h.injectValuesIntoDeclaringField(deserializedHeaders, logger)); return deserializedHeaders; } @SuppressWarnings("deprecation") private static <S extends MapperBuilder<?, ?>> S initializeMapperBuilder(S mapper) { mapper.enable(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS) .enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT) .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY) .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS) .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) .serializationInclusion(JsonInclude.Include.NON_NULL) .addModule(new JavaTimeModule()) .addModule(ByteArraySerializer.getModule()) .addModule(Base64UrlSerializer.getModule()) .addModule(DateTimeSerializer.getModule()) .addModule(DateTimeDeserializer.getModule()) .addModule(DateTimeRfc1123Serializer.getModule()) .addModule(DurationSerializer.getModule()) .addModule(HttpHeadersSerializer.getModule()) .addModule(UnixTimeSerializer.getModule()) .visibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY) .visibility(PropertyAccessor.SETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.IS_GETTER, JsonAutoDetect.Visibility.NONE); return mapper; } private JavaType createJavaType(Type type) { if (type == null) { return null; } else if (type instanceof JavaType) { return (JavaType) type; } else if (type instanceof ParameterizedType) { final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] actualTypeArguments = parameterizedType.getActualTypeArguments(); JavaType[] javaTypeArguments = new JavaType[actualTypeArguments.length]; for (int i = 0; i != actualTypeArguments.length; i++) { javaTypeArguments[i] = createJavaType(actualTypeArguments[i]); } return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory() .constructParametricType((Class<?>) parameterizedType.getRawType(), javaTypeArguments)); } else { return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory().constructType(t)); } } /* * Internal helper class that helps manage converting headers into their header collection. */ private static final class HeaderCollectionHandler { private final String prefix; private final int prefixLength; private final Map<String, String> values; private final Field declaringField; HeaderCollectionHandler(String prefix, Field declaringField) { this.prefix = prefix; this.prefixLength = prefix.length(); this.values = new HashMap<>(); this.declaringField = declaringField; } boolean headerStartsWithPrefix(String headerName) { return headerName.startsWith(prefix); } void addHeader(String headerName, String headerValue) { values.put(headerName.substring(prefixLength), headerValue); } @SuppressWarnings("deprecation") void injectValuesIntoDeclaringField(Object deserializedHeaders, ClientLogger logger) { /* * First check if the deserialized headers type has a public setter. */ if (usePublicSetter(deserializedHeaders, logger)) { return; } logger.verbose("Failed to find or use public setter to set header collection."); /* * Otherwise fallback to setting the field directly. */ final boolean declaredFieldAccessibleBackup = declaringField.isAccessible(); try { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(true); return null; }); } declaringField.set(deserializedHeaders, values); logger.verbose("Set header collection by accessing the field directly."); } catch (IllegalAccessException ex) { logger.warning("Failed to inject header collection values into deserialized headers.", ex); } finally { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(false); return null; }); } } } private boolean usePublicSetter(Object deserializedHeaders, ClientLogger logger) { try { String potentialSetterName = getPotentialSetterName(); Method setterMethod = deserializedHeaders.getClass().getDeclaredMethod(potentialSetterName, Map.class); if (Modifier.isPublic(setterMethod.getModifiers())) { setterMethod.invoke(deserializedHeaders, values); logger.verbose("User setter %s on class %s to set header collection.", potentialSetterName, deserializedHeaders.getClass().getSimpleName()); return true; } return false; } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException ignored) { return false; } } private String getPotentialSetterName() { String fieldName = declaringField.getName(); return "set" + fieldName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldName.substring(1); } } }
class JacksonAdapter implements SerializerAdapter { private static final Pattern PATTERN = Pattern.compile("^\"*|\"*$"); private final ClientLogger logger = new ClientLogger(JacksonAdapter.class); /** * An instance of {@link ObjectMapper} to serialize/deserialize objects. */ private final ObjectMapper mapper; /** * An instance of {@link ObjectMapper} that does not do flattening. */ private final ObjectMapper simpleMapper; private final ObjectMapper xmlMapper; private final ObjectMapper headerMapper; /* * The lazily-created serializer for this ServiceClient. */ private static SerializerAdapter serializerAdapter; private final Map<Type, JavaType> typeToJavaTypeCache = new ConcurrentHashMap<>(); /** * Creates a new JacksonAdapter instance with default mapper settings. */ /** * Gets a static instance of {@link ObjectMapper} that doesn't handle flattening. * * @return an instance of {@link ObjectMapper}. */ protected ObjectMapper simpleMapper() { return simpleMapper; } /** * maintain singleton instance of the default serializer adapter. * * @return the default serializer */ public static synchronized SerializerAdapter createDefaultSerializerAdapter() { if (serializerAdapter == null) { serializerAdapter = new JacksonAdapter(); } return serializerAdapter; } /** * @return the original serializer type */ public ObjectMapper serializer() { return mapper; } @Override public String serialize(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serialize(object, encoding, stream); return new String(stream.toByteArray(), 0, stream.size(), StandardCharsets.UTF_8); } @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object == null) { return; } if ((encoding == SerializerEncoding.XML)) { xmlMapper.writeValue(outputStream, object); } else { serializer().writeValue(outputStream, object); } } @Override public String serializeRaw(Object object) { if (object == null) { return null; } try { return PATTERN.matcher(serialize(object, SerializerEncoding.JSON)).replaceAll(""); } catch (IOException ex) { logger.warning("Failed to serialize {} to JSON.", object.getClass(), ex); return null; } } @Override public String serializeList(List<?> list, CollectionFormat format) { if (list == null) { return null; } List<String> serialized = new ArrayList<>(); for (Object element : list) { String raw = serializeRaw(element); serialized.add(raw != null ? raw : ""); } return String.join(format.getDelimiter(), serialized); } @Override public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (CoreUtils.isNullOrEmpty(value)) { return null; } return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, encoding); } @Override public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException { if (inputStream == null) { return null; } final JavaType javaType = createJavaType(type); try { if (encoding == SerializerEncoding.XML) { return xmlMapper.readValue(inputStream, javaType); } else { return serializer().readValue(inputStream, javaType); } } catch (JsonParseException jpe) { throw logger.logExceptionAsError(new MalformedValueException(jpe.getMessage(), jpe)); } } @Override public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { if (deserializedHeadersType == null) { return null; } T deserializedHeaders = headerMapper.convertValue(headers, createJavaType(deserializedHeadersType)); final Class<?> deserializedHeadersClass = TypeUtil.getRawClass(deserializedHeadersType); final Field[] declaredFields = deserializedHeadersClass.getDeclaredFields(); /* * A list containing all handlers for header collections of the header type. */ final List<HeaderCollectionHandler> headerCollectionHandlers = new ArrayList<>(); /* * This set is an optimization where we track the first character of all HeaderCollections defined on the * deserialized headers type. This allows us to optimize away startWiths checks which are much more costly than * getting the first character. */ final Set<Character> headerCollectionsFirstCharacters = new HashSet<>(); /* * Begin by looping over all declared fields and initializing all header collection information. */ for (final Field declaredField : declaredFields) { if (!declaredField.isAnnotationPresent(HeaderCollection.class)) { continue; } final Type declaredFieldType = declaredField.getGenericType(); if (!TypeUtil.isTypeOrSubTypeOf(declaredField.getType(), Map.class)) { continue; } final Type[] mapTypeArguments = TypeUtil.getTypeArguments(declaredFieldType); if (mapTypeArguments.length != 2 || mapTypeArguments[0] != String.class || mapTypeArguments[1] != String.class) { continue; } final HeaderCollection headerCollectionAnnotation = declaredField.getAnnotation(HeaderCollection.class); final String headerCollectionPrefix = headerCollectionAnnotation.value().toLowerCase(Locale.ROOT); final int headerCollectionPrefixLength = headerCollectionPrefix.length(); if (headerCollectionPrefixLength == 0) { continue; } headerCollectionHandlers.add(new HeaderCollectionHandler(headerCollectionPrefix, declaredField)); headerCollectionsFirstCharacters.add(headerCollectionPrefix.charAt(0)); } /* * Then loop over all headers and check if they begin with any of the prefixes found. */ for (final HttpHeader header : headers) { String headerNameLower = header.getName().toLowerCase(Locale.ROOT); /* * Optimization to skip this header as it doesn't begin with any character starting header collections in * the deserialized headers type. */ if (!headerCollectionsFirstCharacters.contains(headerNameLower.charAt(0))) { continue; } for (HeaderCollectionHandler headerCollectionHandler : headerCollectionHandlers) { if (headerCollectionHandler.headerStartsWithPrefix(headerNameLower)) { headerCollectionHandler.addHeader(header.getName(), header.getValue()); } } } /* * Finally inject all found header collection values into the deserialized headers. */ headerCollectionHandlers.forEach(h -> h.injectValuesIntoDeclaringField(deserializedHeaders, logger)); return deserializedHeaders; } @SuppressWarnings("deprecation") private static <S extends MapperBuilder<?, ?>> S initializeMapperBuilder(S mapper) { mapper.enable(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS) .enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT) .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY) .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) .disable(SerializationFeature.FAIL_ON_EMPTY_BEANS) .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) .serializationInclusion(JsonInclude.Include.NON_NULL) .addModule(new JavaTimeModule()) .addModule(ByteArraySerializer.getModule()) .addModule(Base64UrlSerializer.getModule()) .addModule(DateTimeSerializer.getModule()) .addModule(DateTimeDeserializer.getModule()) .addModule(DateTimeRfc1123Serializer.getModule()) .addModule(DurationSerializer.getModule()) .addModule(HttpHeadersSerializer.getModule()) .addModule(UnixTimeSerializer.getModule()) .visibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY) .visibility(PropertyAccessor.SETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE) .visibility(PropertyAccessor.IS_GETTER, JsonAutoDetect.Visibility.NONE); return mapper; } private JavaType createJavaType(Type type) { if (type == null) { return null; } else if (type instanceof JavaType) { return (JavaType) type; } else if (type instanceof ParameterizedType) { final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] actualTypeArguments = parameterizedType.getActualTypeArguments(); JavaType[] javaTypeArguments = new JavaType[actualTypeArguments.length]; for (int i = 0; i != actualTypeArguments.length; i++) { javaTypeArguments[i] = createJavaType(actualTypeArguments[i]); } return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory() .constructParametricType((Class<?>) parameterizedType.getRawType(), javaTypeArguments)); } else { return typeToJavaTypeCache.computeIfAbsent(type, t -> mapper.getTypeFactory().constructType(t)); } } /* * Internal helper class that helps manage converting headers into their header collection. */ private static final class HeaderCollectionHandler { private final String prefix; private final int prefixLength; private final Map<String, String> values; private final Field declaringField; HeaderCollectionHandler(String prefix, Field declaringField) { this.prefix = prefix; this.prefixLength = prefix.length(); this.values = new HashMap<>(); this.declaringField = declaringField; } boolean headerStartsWithPrefix(String headerName) { return headerName.startsWith(prefix); } void addHeader(String headerName, String headerValue) { values.put(headerName.substring(prefixLength), headerValue); } @SuppressWarnings("deprecation") void injectValuesIntoDeclaringField(Object deserializedHeaders, ClientLogger logger) { /* * First check if the deserialized headers type has a public setter. */ if (usePublicSetter(deserializedHeaders, logger)) { return; } logger.verbose("Failed to find or use public setter to set header collection."); /* * Otherwise fallback to setting the field directly. */ final boolean declaredFieldAccessibleBackup = declaringField.isAccessible(); try { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(true); return null; }); } declaringField.set(deserializedHeaders, values); logger.verbose("Set header collection by accessing the field directly."); } catch (IllegalAccessException ex) { logger.warning("Failed to inject header collection values into deserialized headers.", ex); } finally { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(false); return null; }); } } } private boolean usePublicSetter(Object deserializedHeaders, ClientLogger logger) { try { String potentialSetterName = getPotentialSetterName(); Method setterMethod = deserializedHeaders.getClass().getDeclaredMethod(potentialSetterName, Map.class); if (Modifier.isPublic(setterMethod.getModifiers())) { setterMethod.invoke(deserializedHeaders, values); logger.verbose("User setter %s on class %s to set header collection.", potentialSetterName, deserializedHeaders.getClass().getSimpleName()); return true; } return false; } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException ignored) { return false; } } private String getPotentialSetterName() { String fieldName = declaringField.getName(); return "set" + fieldName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldName.substring(1); } } }
`responseClass` : Does this needs to be checked for null ?
private MethodHandle locateResponseConstructor(Class<?> responseClass) { /* * If we were able to write this using Java 9+ code. * * First check if the response class's module is exported to all unnamed modules. If it is we will use * MethodHandles.publicLookup() which is meant for creating MethodHandle instances for publicly accessible * classes. */ MethodHandles.Lookup lookupToUse; if (responseClass.getModule().isExported("")) { lookupToUse = MethodHandles.publicLookup(); } else { /* * Otherwise we use the MethodHandles.Lookup which is associated to this (com.azure.core) module, and more * specifically, is tied to this class (ResponseConstructorsCache). But, in order to use this lookup we * need to ensure that the com.azure.core module reads the response class's module as the lookup won't * have permissions necessary to create the MethodHandle instance without it. */ lookupToUse = LOOKUP; if (!CORE_MODULE.canRead(responseClass.getModule())) { CORE_MODULE.addReads(responseClass.getModule()); } } /* * Now that we have the MethodHandles.Lookup to create our method handle instance we will begin searching for * the most specific handle we can use the create the response class (as mentioned in the method Javadocs). */ Constructor<?>[] constructors = responseClass.getDeclaredConstructors(); Arrays.sort(constructors, Comparator.comparing(Constructor::getParameterCount, (a, b) -> b - a)); for (Constructor<?> constructor : constructors) { final int paramCount = constructor.getParameterCount(); if (paramCount >= 3 && paramCount <= 5) { try { return lookupToUse.unreflectConstructor(constructor); } catch (Throwable t) { throw logger.logExceptionAsError(new RuntimeException(t)); } } } return null; }
if (responseClass.getModule().isExported("")) {
private MethodHandle locateResponseConstructor(Class<?> responseClass) { MethodHandles.Lookup lookupToUse; try { lookupToUse = ReflectionUtils.getLookupToUse(responseClass); } catch (Throwable t) { throw logger.logExceptionAsError(new RuntimeException(t)); } /* * Now that the MethodHandles.Lookup has been found to create the MethodHandle instance begin searching for * the most specific MethodHandle that can be used to create the response class (as mentioned in the method * Javadocs). */ Constructor<?>[] constructors = responseClass.getDeclaredConstructors(); Arrays.sort(constructors, Comparator.comparing(Constructor::getParameterCount, (a, b) -> b - a)); for (Constructor<?> constructor : constructors) { final int paramCount = constructor.getParameterCount(); if (paramCount >= 3 && paramCount <= 5) { try { /* * From here we have three, possibly more options, to resolve this. * * 1) setAccessible to true in the response class (requires doPrivilege). * 2) Use Java 9+ Module class to add reads in com.azure.core and the SDK library exports to * com.azure.core for implementation. * 3) SDK libraries create an accessible MethodHandles.Lookup which com.azure.core can use to spoof * as the SDK library. */ return lookupToUse.unreflectConstructor(constructor); } catch (Throwable t) { throw logger.logExceptionAsError(new RuntimeException(t)); } } } return null; }
class or null if no handle is * found. */
class or null if no handle is * found. */
Calls to `isModuleExported()`, `canReadModule()` and `addModuleRead()` can all potentially throw. We should fallback to `MethodHandles.publicLookup()` if there's an exception.
private MethodHandle locateResponseConstructor(Class<?> responseClass) { /* * If we were able to write this using Java 9+ code. * * First check if the response class's module is exported to all unnamed modules. If it is we will use * MethodHandles.publicLookup() which is meant for creating MethodHandle instances for publicly accessible * classes. */ MethodHandles.Lookup lookupToUse; if (GET_MODULE == null) { lookupToUse = MethodHandles.publicLookup(); } else { Object responseModule = getModule(responseClass, logger); if (isModuleExported(responseModule, logger)) { lookupToUse = MethodHandles.publicLookup(); } else { /* * Otherwise we use the MethodHandles.Lookup which is associated to this (com.azure.core) module, and more * specifically, is tied to this class (ResponseConstructorsCache). But, in order to use this lookup we * need to ensure that the com.azure.core module reads the response class's module as the lookup won't * have permissions necessary to create the MethodHandle instance without it. */ lookupToUse = LOOKUP; if (!canReadModule(responseModule, logger)) { addModuleRead(responseModule, logger); } } } /* * Now that we have the MethodHandles.Lookup to create our method handle instance we will begin searching for * the most specific handle we can use the create the response class (as mentioned in the method Javadocs). */ Constructor<?>[] constructors = responseClass.getDeclaredConstructors(); Arrays.sort(constructors, Comparator.comparing(Constructor::getParameterCount, (a, b) -> b - a)); for (Constructor<?> constructor : constructors) { final int paramCount = constructor.getParameterCount(); if (paramCount >= 3 && paramCount <= 5) { try { /* * From here we have three, possibly more options, to resolve this. * * 1) setAccessible to true in the response class (requires doPrivilege). * 2) Use Java 9+ Module class to add reads in com.azure.core and the SDK library exports to * com.azure.core for implementation. * 3) SDK libraries create an accessible MethodHandles.Lookup which com.azure.core can use to spoof * as the SDK library. */ return lookupToUse.unreflectConstructor(constructor); } catch (Throwable t) { throw logger.logExceptionAsError(new RuntimeException(t)); } } } return null; }
}
private MethodHandle locateResponseConstructor(Class<?> responseClass) { MethodHandles.Lookup lookupToUse; try { lookupToUse = ReflectionUtils.getLookupToUse(responseClass); } catch (Throwable t) { throw logger.logExceptionAsError(new RuntimeException(t)); } /* * Now that the MethodHandles.Lookup has been found to create the MethodHandle instance begin searching for * the most specific MethodHandle that can be used to create the response class (as mentioned in the method * Javadocs). */ Constructor<?>[] constructors = responseClass.getDeclaredConstructors(); Arrays.sort(constructors, Comparator.comparing(Constructor::getParameterCount, (a, b) -> b - a)); for (Constructor<?> constructor : constructors) { final int paramCount = constructor.getParameterCount(); if (paramCount >= 3 && paramCount <= 5) { try { /* * From here we have three, possibly more options, to resolve this. * * 1) setAccessible to true in the response class (requires doPrivilege). * 2) Use Java 9+ Module class to add reads in com.azure.core and the SDK library exports to * com.azure.core for implementation. * 3) SDK libraries create an accessible MethodHandles.Lookup which com.azure.core can use to spoof * as the SDK library. */ return lookupToUse.unreflectConstructor(constructor); } catch (Throwable t) { throw logger.logExceptionAsError(new RuntimeException(t)); } } } return null; }
class or null if no handle is * found. */
class or null if no handle is * found. */
If we wanted this to be fully reactive: ```suggestion Mono<Void> deleteAllSettingsMono = client.listConfigurationSettings(null) .flatMap(setting -> client.deleteConfigurationSettingWithResponse(setting, false)) .then(); StepVerifier.create(deleteAllSettingsMono) .verifyComplete(); ``` This code doesn't need to worry about asserting values returned by the delete.
public void listConfigurationSettingsWithNullSelector(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); final PagedFlux<ConfigurationSetting> configurationSettingPagedFlux2 = client.listConfigurationSettings(null); for (ConfigurationSetting setting : configurationSettingPagedFlux2.toIterable()) { StepVerifier.create(client.deleteConfigurationSettingWithResponse(setting, false)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); } listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(null)) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); assertEquals(2, selected.size()); return selected; }); }
}
public void listConfigurationSettingsWithNullSelector(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final String key2 = getKey(); StepVerifier.create( client.listConfigurationSettings(null) .flatMap(setting -> client.deleteConfigurationSettingWithResponse(setting, false)) .then()) .verifyComplete(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(null)) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); assertEquals(2, selected.size()); return selected; }); }
class ConfigurationAsyncClientTest extends ConfigurationClientTestBase { private final ClientLogger logger = new ClientLogger(ConfigurationAsyncClientTest.class); private static final String NO_LABEL = null; private ConfigurationAsyncClient client; @Override protected String getTestName() { return ""; } @Override protected void beforeTest() { beforeTestSetup(); } @Override protected void afterTest() { logger.info("Cleaning up created key values."); client.listConfigurationSettings(new SettingSelector().setKeyFilter(keyPrefix + "*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); Mono<Response<ConfigurationSetting>> unlock = configurationSetting.isReadOnly() ? client.setReadOnlyWithResponse(configurationSetting, false) : Mono.empty(); return unlock.then(client.deleteConfigurationSettingWithResponse(configurationSetting, false)); }) .blockLast(); logger.info("Finished cleaning up values."); } private ConfigurationAsyncClient getConfigurationAsyncClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { return clientSetup(credentials -> { ConfigurationClientBuilder builder = new ConfigurationClientBuilder() .connectionString(connectionString) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .serviceVersion(serviceVersion) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)); if (getTestMode() != TestMode.PLAYBACK) { builder .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); }); } /** * Tests that a configuration is able to be added, these are differentiate from each other using a key or key-label * identifier. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot add a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting("", null, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can add configuration settings when value is not null or an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.addConfigurationSetting(setting.getKey(), setting.getLabel(), setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting(null, null, "A Value")) .expectError(IllegalArgumentException.class) .verify(); StepVerifier.create(client.addConfigurationSettingWithResponse(null)) .expectError(NullPointerException.class) .verify(); } /** * Tests that a configuration cannot be added twice with the same key. This should return a 412 error. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addExistingSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addExistingSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.addConfigurationSettingWithResponse(expected))) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceExistsException.class, HttpURLConnection.HTTP_PRECON_FAILED))); } /** * Tests that a configuration is able to be added or updated with set. * When the configuration is read-only updates cannot happen, this will result in a 409. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingRunner((expected, update) -> StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that when an ETag is passed to set it will only set if the current representation of the setting has the * ETag. If the set ETag doesn't match anything the update won't happen, this will result in a 412. This will * prevent set from doing an add as well. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingIfETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingIfETagRunner((initial, update) -> { StepVerifier.create(client.setConfigurationSettingWithResponse(initial.setETag("badEtag"), true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); final String etag = client.addConfigurationSettingWithResponse(initial).block().getValue().getETag(); StepVerifier.create(client.setConfigurationSettingWithResponse(update.setETag(etag), true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(initial, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.getConfigurationSettingWithResponse(update, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); }); } /** * Tests that we cannot set a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting("", NO_LABEL, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can set configuration settings when value is not null or an empty string. * Value is not a required property. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.setConfigurationSetting(setting.getKey(), NO_LABEL, setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting(null, NO_LABEL, "A Value")) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.setConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests that a configuration is able to be retrieved when it exists, whether or not it is read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); getConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that attempting to retrieve a non-existent configuration doesn't work, this will result in a 404. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverRetrievedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverRetreivedValue"); final ConfigurationSetting nonExistentLabel = new ConfigurationSetting().setKey(key).setLabel("myNonExistentLabel"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverRetrievedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverRetrievedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting("myNonExistentKey", null, null)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); StepVerifier.create(client.getConfigurationSettingWithResponse(nonExistentLabel, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that configurations are able to be deleted when they exist. * After the configuration has been deleted attempting to get it will result in a 404, the same as if the * configuration never existed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(expected, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Tests that attempting to delete a non-existent configuration will return a 204. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverDeletedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverDeletedValue"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverDeletedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey("myNonExistentKey"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(neverDeletedConfiguration.getKey()).setLabel("myNonExistentLabel"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(neverDeletedConfiguration.getKey(), neverDeletedConfiguration.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); } /** * Tests that when an ETag is passed to delete it will only delete if the current representation of the setting has the ETag. * If the delete ETag doesn't match anything the delete won't happen, this will result in a 412. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingWithETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingWithETagRunner((initial, update) -> { final ConfigurationSetting initiallyAddedConfig = client.addConfigurationSettingWithResponse(initial).block().getValue(); final ConfigurationSetting updatedConfig = client.setConfigurationSettingWithResponse(update, true).block().getValue(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(initiallyAddedConfig, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.deleteConfigurationSettingWithResponse(updatedConfig, true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.deleteConfigurationSetting(null, null)) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.deleteConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnlyWithResponse(expected, true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnlyWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Verifies that a ConfigurationSetting can be added with a label, and that we can fetch that ConfigurationSetting * from the service when filtering by either its label or just its key. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithKeyAndLabel(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String value = "myValue"; final String key = testResourceNamer.randomName(keyPrefix, 16); final String label = testResourceNamer.randomName("lbl", 8); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue(value).setLabel(label); StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); } /** * Verifies that ConfigurationSettings can be added and that we can fetch those ConfigurationSettings from the * service when filtering by their keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper /** * Verifies that ConfigurationSettings can be added with different labels and that we can fetch those ConfigurationSettings * from the service when filtering by their labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listWithMultipleLabelsRunner(key, label, label2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can select filter results by key, label, and select fields using SettingSelector. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFields(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); listConfigurationSettingsSelectFieldsRunner((settings, selector) -> { final List<Mono<Response<ConfigurationSetting>>> settingsBeingAdded = new ArrayList<>(); for (ConfigurationSetting setting : settings) { settingsBeingAdded.add(client.setConfigurationSettingWithResponse(setting, false)); } Flux.merge(settingsBeingAdded).blockLast(); List<ConfigurationSetting> settingsReturned = new ArrayList<>(); StepVerifier.create(client.listConfigurationSettings(selector)) .assertNext(settingsReturned::add) .assertNext(settingsReturned::add) .verifyComplete(); return settingsReturned; }); } /** * Verifies that throws exception when using SettingSelector with not supported *a key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey(), getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey() + "*", getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel() + "*"); } /** * Verifies that we can get a ConfigurationSetting at the provided accept datetime */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listConfigurationSettings(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); } /** * Verifies that we can get all of the revisions for this ConfigurationSetting. Then verifies that we can select * specific fields. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisions(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName))) .assertNext(response -> assertConfigurationEquals(updated2, response)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName).setFields(SettingFields.KEY, SettingFields.ETAG))) .assertNext(response -> validateListRevisions(updated2, response)) .assertNext(response -> validateListRevisions(updated, response)) .assertNext(response -> validateListRevisions(original, response)) .verifyComplete(); assertTrue(client.listRevisions(null).toStream().collect(Collectors.toList()).size() > 0); } /** * Verifies that we can get all the revisions for all settings with the specified keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listRevisionsWithMultipleKeysRunner(key, key2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get all revisions for all settings with the specified labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listRevisionsWithMultipleLabelsRunner(key, label, label2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get a subset of revisions based on the "acceptDateTime" */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listRevisions(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listRevisions(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatStream(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatIterator(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of existing settings, we can list the ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix + "-" + value).setValue("myValue").setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix + "-*").setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listConfigurationSettings(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies the conditional "GET" scenario where the setting has yet to be updated, resulting in a 304. This GET * scenario will return a setting when the ETag provided does not match the one of the current setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingWhenValueNotUpdated(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue("myValue"); final ConfigurationSetting newExpected = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting block = client.addConfigurationSettingWithResponse(expected).block().getValue(); assertNotNull(block); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(null, response, 304)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(newExpected, false)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper @Disabled public void deleteAllSettings(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); client.listConfigurationSettings(new SettingSelector().setKeyFilter("*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); return client.deleteConfigurationSettingWithResponse(configurationSetting, false); }).blockLast(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addHeadersFromContextPolicyTest(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final HttpHeaders headers = getCustomizedHeaders(); addHeadersFromContextPolicyRunner(expected -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected) .subscriberContext(Context.of(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers))) .assertNext(response -> { final HttpHeaders requestHeaders = response.getRequest().getHeaders(); assertContainsHeaders(headers, requestHeaders); }) .verifyComplete()); } /** * Test helper that calling list configuration setting with given key and label input * * @param keyFilter key filter expression * @param labelFilter label filter expression */ private void filterValueTest(String keyFilter, String labelFilter) { listConfigurationSettingsSelectFieldsWithNotSupportedFilterRunner(keyFilter, labelFilter, selector -> StepVerifier.create(client.listConfigurationSettings(selector)) .verifyError(HttpResponseException.class)); } }
class ConfigurationAsyncClientTest extends ConfigurationClientTestBase { private final ClientLogger logger = new ClientLogger(ConfigurationAsyncClientTest.class); private static final String NO_LABEL = null; private ConfigurationAsyncClient client; @Override protected String getTestName() { return ""; } @Override protected void beforeTest() { beforeTestSetup(); } @Override protected void afterTest() { logger.info("Cleaning up created key values."); client.listConfigurationSettings(new SettingSelector().setKeyFilter(keyPrefix + "*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); Mono<Response<ConfigurationSetting>> unlock = configurationSetting.isReadOnly() ? client.setReadOnlyWithResponse(configurationSetting, false) : Mono.empty(); return unlock.then(client.deleteConfigurationSettingWithResponse(configurationSetting, false)); }) .blockLast(); logger.info("Finished cleaning up values."); } private ConfigurationAsyncClient getConfigurationAsyncClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { return clientSetup(credentials -> { ConfigurationClientBuilder builder = new ConfigurationClientBuilder() .connectionString(connectionString) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .serviceVersion(serviceVersion) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)); if (getTestMode() != TestMode.PLAYBACK) { builder .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); }); } /** * Tests that a configuration is able to be added, these are differentiate from each other using a key or key-label * identifier. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot add a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting("", null, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can add configuration settings when value is not null or an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.addConfigurationSetting(setting.getKey(), setting.getLabel(), setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting(null, null, "A Value")) .expectError(IllegalArgumentException.class) .verify(); StepVerifier.create(client.addConfigurationSettingWithResponse(null)) .expectError(NullPointerException.class) .verify(); } /** * Tests that a configuration cannot be added twice with the same key. This should return a 412 error. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addExistingSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addExistingSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.addConfigurationSettingWithResponse(expected))) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceExistsException.class, HttpURLConnection.HTTP_PRECON_FAILED))); } /** * Tests that a configuration is able to be added or updated with set. * When the configuration is read-only updates cannot happen, this will result in a 409. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingRunner((expected, update) -> StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that when an ETag is passed to set it will only set if the current representation of the setting has the * ETag. If the set ETag doesn't match anything the update won't happen, this will result in a 412. This will * prevent set from doing an add as well. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingIfETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingIfETagRunner((initial, update) -> { StepVerifier.create(client.setConfigurationSettingWithResponse(initial.setETag("badEtag"), true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); final String etag = client.addConfigurationSettingWithResponse(initial).block().getValue().getETag(); StepVerifier.create(client.setConfigurationSettingWithResponse(update.setETag(etag), true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(initial, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.getConfigurationSettingWithResponse(update, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); }); } /** * Tests that we cannot set a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting("", NO_LABEL, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can set configuration settings when value is not null or an empty string. * Value is not a required property. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.setConfigurationSetting(setting.getKey(), NO_LABEL, setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting(null, NO_LABEL, "A Value")) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.setConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests that a configuration is able to be retrieved when it exists, whether or not it is read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); getConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that attempting to retrieve a non-existent configuration doesn't work, this will result in a 404. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverRetrievedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverRetreivedValue"); final ConfigurationSetting nonExistentLabel = new ConfigurationSetting().setKey(key).setLabel("myNonExistentLabel"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverRetrievedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverRetrievedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting("myNonExistentKey", null, null)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); StepVerifier.create(client.getConfigurationSettingWithResponse(nonExistentLabel, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that configurations are able to be deleted when they exist. * After the configuration has been deleted attempting to get it will result in a 404, the same as if the * configuration never existed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(expected, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Tests that attempting to delete a non-existent configuration will return a 204. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverDeletedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverDeletedValue"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverDeletedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey("myNonExistentKey"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(neverDeletedConfiguration.getKey()).setLabel("myNonExistentLabel"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(neverDeletedConfiguration.getKey(), neverDeletedConfiguration.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); } /** * Tests that when an ETag is passed to delete it will only delete if the current representation of the setting has the ETag. * If the delete ETag doesn't match anything the delete won't happen, this will result in a 412. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingWithETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingWithETagRunner((initial, update) -> { final ConfigurationSetting initiallyAddedConfig = client.addConfigurationSettingWithResponse(initial).block().getValue(); final ConfigurationSetting updatedConfig = client.setConfigurationSettingWithResponse(update, true).block().getValue(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(initiallyAddedConfig, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.deleteConfigurationSettingWithResponse(updatedConfig, true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.deleteConfigurationSetting(null, null)) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.deleteConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnlyWithResponse(expected, true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnlyWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Verifies that a ConfigurationSetting can be added with a label, and that we can fetch that ConfigurationSetting * from the service when filtering by either its label or just its key. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithKeyAndLabel(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String value = "myValue"; final String key = testResourceNamer.randomName(keyPrefix, 16); final String label = testResourceNamer.randomName("lbl", 8); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue(value).setLabel(label); StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); } /** * Verifies that ConfigurationSettings can be added and that we can fetch those ConfigurationSettings from the * service when filtering by their keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper /** * Verifies that ConfigurationSettings can be added with different labels and that we can fetch those ConfigurationSettings * from the service when filtering by their labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listWithMultipleLabelsRunner(key, label, label2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can select filter results by key, label, and select fields using SettingSelector. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFields(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); listConfigurationSettingsSelectFieldsRunner((settings, selector) -> { final List<Mono<Response<ConfigurationSetting>>> settingsBeingAdded = new ArrayList<>(); for (ConfigurationSetting setting : settings) { settingsBeingAdded.add(client.setConfigurationSettingWithResponse(setting, false)); } Flux.merge(settingsBeingAdded).blockLast(); List<ConfigurationSetting> settingsReturned = new ArrayList<>(); StepVerifier.create(client.listConfigurationSettings(selector)) .assertNext(settingsReturned::add) .assertNext(settingsReturned::add) .verifyComplete(); return settingsReturned; }); } /** * Verifies that throws exception when using SettingSelector with not supported *a key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey(), getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey() + "*", getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel() + "*"); } /** * Verifies that we can get a ConfigurationSetting at the provided accept datetime */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listConfigurationSettings(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); } /** * Verifies that we can get all of the revisions for this ConfigurationSetting. Then verifies that we can select * specific fields. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisions(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName))) .assertNext(response -> assertConfigurationEquals(updated2, response)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName).setFields(SettingFields.KEY, SettingFields.ETAG))) .assertNext(response -> validateListRevisions(updated2, response)) .assertNext(response -> validateListRevisions(updated, response)) .assertNext(response -> validateListRevisions(original, response)) .verifyComplete(); assertTrue(client.listRevisions(null).toStream().collect(Collectors.toList()).size() > 0); } /** * Verifies that we can get all the revisions for all settings with the specified keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listRevisionsWithMultipleKeysRunner(key, key2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get all revisions for all settings with the specified labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listRevisionsWithMultipleLabelsRunner(key, label, label2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get a subset of revisions based on the "acceptDateTime" */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listRevisions(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listRevisions(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatStream(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatIterator(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of existing settings, we can list the ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix + "-" + value).setValue("myValue").setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix + "-*").setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listConfigurationSettings(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies the conditional "GET" scenario where the setting has yet to be updated, resulting in a 304. This GET * scenario will return a setting when the ETag provided does not match the one of the current setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingWhenValueNotUpdated(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue("myValue"); final ConfigurationSetting newExpected = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting block = client.addConfigurationSettingWithResponse(expected).block().getValue(); assertNotNull(block); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(null, response, 304)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(newExpected, false)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper @Disabled public void deleteAllSettings(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); client.listConfigurationSettings(new SettingSelector().setKeyFilter("*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); return client.deleteConfigurationSettingWithResponse(configurationSetting, false); }).blockLast(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addHeadersFromContextPolicyTest(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final HttpHeaders headers = getCustomizedHeaders(); addHeadersFromContextPolicyRunner(expected -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected) .subscriberContext(Context.of(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers))) .assertNext(response -> { final HttpHeaders requestHeaders = response.getRequest().getHeaders(); assertContainsHeaders(headers, requestHeaders); }) .verifyComplete()); } /** * Test helper that calling list configuration setting with given key and label input * * @param keyFilter key filter expression * @param labelFilter label filter expression */ private void filterValueTest(String keyFilter, String labelFilter) { listConfigurationSettingsSelectFieldsWithNotSupportedFilterRunner(keyFilter, labelFilter, selector -> StepVerifier.create(client.listConfigurationSettings(selector)) .verifyError(HttpResponseException.class)); } }
could these variable be final ?
public void listConfigurationSettingsWithNullSelector(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); Mono<Void> deleteAllSettingsMono = client.listConfigurationSettings(null) .flatMap(setting -> client.deleteConfigurationSettingWithResponse(setting, false)) .then(); StepVerifier.create(deleteAllSettingsMono) .verifyComplete(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(null)) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); assertEquals(2, selected.size()); return selected; }); }
String key2 = getKey();
public void listConfigurationSettingsWithNullSelector(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final String key2 = getKey(); StepVerifier.create( client.listConfigurationSettings(null) .flatMap(setting -> client.deleteConfigurationSettingWithResponse(setting, false)) .then()) .verifyComplete(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(null)) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); assertEquals(2, selected.size()); return selected; }); }
class ConfigurationAsyncClientTest extends ConfigurationClientTestBase { private final ClientLogger logger = new ClientLogger(ConfigurationAsyncClientTest.class); private static final String NO_LABEL = null; private ConfigurationAsyncClient client; @Override protected String getTestName() { return ""; } @Override protected void beforeTest() { beforeTestSetup(); } @Override protected void afterTest() { logger.info("Cleaning up created key values."); client.listConfigurationSettings(new SettingSelector().setKeyFilter(keyPrefix + "*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); Mono<Response<ConfigurationSetting>> unlock = configurationSetting.isReadOnly() ? client.setReadOnlyWithResponse(configurationSetting, false) : Mono.empty(); return unlock.then(client.deleteConfigurationSettingWithResponse(configurationSetting, false)); }) .blockLast(); logger.info("Finished cleaning up values."); } private ConfigurationAsyncClient getConfigurationAsyncClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { return clientSetup(credentials -> { ConfigurationClientBuilder builder = new ConfigurationClientBuilder() .connectionString(connectionString) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .serviceVersion(serviceVersion) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)); if (getTestMode() != TestMode.PLAYBACK) { builder .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); }); } /** * Tests that a configuration is able to be added, these are differentiate from each other using a key or key-label * identifier. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot add a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting("", null, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can add configuration settings when value is not null or an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.addConfigurationSetting(setting.getKey(), setting.getLabel(), setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting(null, null, "A Value")) .expectError(IllegalArgumentException.class) .verify(); StepVerifier.create(client.addConfigurationSettingWithResponse(null)) .expectError(NullPointerException.class) .verify(); } /** * Tests that a configuration cannot be added twice with the same key. This should return a 412 error. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addExistingSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addExistingSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.addConfigurationSettingWithResponse(expected))) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceExistsException.class, HttpURLConnection.HTTP_PRECON_FAILED))); } /** * Tests that a configuration is able to be added or updated with set. * When the configuration is read-only updates cannot happen, this will result in a 409. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingRunner((expected, update) -> StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that when an ETag is passed to set it will only set if the current representation of the setting has the * ETag. If the set ETag doesn't match anything the update won't happen, this will result in a 412. This will * prevent set from doing an add as well. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingIfETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingIfETagRunner((initial, update) -> { StepVerifier.create(client.setConfigurationSettingWithResponse(initial.setETag("badEtag"), true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); final String etag = client.addConfigurationSettingWithResponse(initial).block().getValue().getETag(); StepVerifier.create(client.setConfigurationSettingWithResponse(update.setETag(etag), true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(initial, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.getConfigurationSettingWithResponse(update, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); }); } /** * Tests that we cannot set a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting("", NO_LABEL, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can set configuration settings when value is not null or an empty string. * Value is not a required property. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.setConfigurationSetting(setting.getKey(), NO_LABEL, setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting(null, NO_LABEL, "A Value")) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.setConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests that a configuration is able to be retrieved when it exists, whether or not it is read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); getConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that attempting to retrieve a non-existent configuration doesn't work, this will result in a 404. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverRetrievedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverRetreivedValue"); final ConfigurationSetting nonExistentLabel = new ConfigurationSetting().setKey(key).setLabel("myNonExistentLabel"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverRetrievedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverRetrievedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting("myNonExistentKey", null, null)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); StepVerifier.create(client.getConfigurationSettingWithResponse(nonExistentLabel, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that configurations are able to be deleted when they exist. * After the configuration has been deleted attempting to get it will result in a 404, the same as if the * configuration never existed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(expected, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Tests that attempting to delete a non-existent configuration will return a 204. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverDeletedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverDeletedValue"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverDeletedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey("myNonExistentKey"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(neverDeletedConfiguration.getKey()).setLabel("myNonExistentLabel"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(neverDeletedConfiguration.getKey(), neverDeletedConfiguration.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); } /** * Tests that when an ETag is passed to delete it will only delete if the current representation of the setting has the ETag. * If the delete ETag doesn't match anything the delete won't happen, this will result in a 412. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingWithETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingWithETagRunner((initial, update) -> { final ConfigurationSetting initiallyAddedConfig = client.addConfigurationSettingWithResponse(initial).block().getValue(); final ConfigurationSetting updatedConfig = client.setConfigurationSettingWithResponse(update, true).block().getValue(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(initiallyAddedConfig, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.deleteConfigurationSettingWithResponse(updatedConfig, true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.deleteConfigurationSetting(null, null)) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.deleteConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnlyWithResponse(expected, true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnlyWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Verifies that a ConfigurationSetting can be added with a label, and that we can fetch that ConfigurationSetting * from the service when filtering by either its label or just its key. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithKeyAndLabel(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String value = "myValue"; final String key = testResourceNamer.randomName(keyPrefix, 16); final String label = testResourceNamer.randomName("lbl", 8); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue(value).setLabel(label); StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); } /** * Verifies that ConfigurationSettings can be added and that we can fetch those ConfigurationSettings from the * service when filtering by their keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper /** * Verifies that ConfigurationSettings can be added with different labels and that we can fetch those ConfigurationSettings * from the service when filtering by their labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listWithMultipleLabelsRunner(key, label, label2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can select filter results by key, label, and select fields using SettingSelector. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFields(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); listConfigurationSettingsSelectFieldsRunner((settings, selector) -> { final List<Mono<Response<ConfigurationSetting>>> settingsBeingAdded = new ArrayList<>(); for (ConfigurationSetting setting : settings) { settingsBeingAdded.add(client.setConfigurationSettingWithResponse(setting, false)); } Flux.merge(settingsBeingAdded).blockLast(); List<ConfigurationSetting> settingsReturned = new ArrayList<>(); StepVerifier.create(client.listConfigurationSettings(selector)) .assertNext(settingsReturned::add) .assertNext(settingsReturned::add) .verifyComplete(); return settingsReturned; }); } /** * Verifies that throws exception when using SettingSelector with not supported *a key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey(), getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey() + "*", getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel() + "*"); } /** * Verifies that we can get a ConfigurationSetting at the provided accept datetime */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listConfigurationSettings(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); } /** * Verifies that we can get all of the revisions for this ConfigurationSetting. Then verifies that we can select * specific fields. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisions(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName))) .assertNext(response -> assertConfigurationEquals(updated2, response)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName).setFields(SettingFields.KEY, SettingFields.ETAG))) .assertNext(response -> validateListRevisions(updated2, response)) .assertNext(response -> validateListRevisions(updated, response)) .assertNext(response -> validateListRevisions(original, response)) .verifyComplete(); assertTrue(client.listRevisions(null).toStream().collect(Collectors.toList()).size() > 0); } /** * Verifies that we can get all the revisions for all settings with the specified keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listRevisionsWithMultipleKeysRunner(key, key2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get all revisions for all settings with the specified labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listRevisionsWithMultipleLabelsRunner(key, label, label2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get a subset of revisions based on the "acceptDateTime" */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listRevisions(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listRevisions(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatStream(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatIterator(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of existing settings, we can list the ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix + "-" + value).setValue("myValue").setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix + "-*").setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listConfigurationSettings(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies the conditional "GET" scenario where the setting has yet to be updated, resulting in a 304. This GET * scenario will return a setting when the ETag provided does not match the one of the current setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingWhenValueNotUpdated(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue("myValue"); final ConfigurationSetting newExpected = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting block = client.addConfigurationSettingWithResponse(expected).block().getValue(); assertNotNull(block); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(null, response, 304)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(newExpected, false)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper @Disabled public void deleteAllSettings(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); client.listConfigurationSettings(new SettingSelector().setKeyFilter("*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); return client.deleteConfigurationSettingWithResponse(configurationSetting, false); }).blockLast(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addHeadersFromContextPolicyTest(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final HttpHeaders headers = getCustomizedHeaders(); addHeadersFromContextPolicyRunner(expected -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected) .subscriberContext(Context.of(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers))) .assertNext(response -> { final HttpHeaders requestHeaders = response.getRequest().getHeaders(); assertContainsHeaders(headers, requestHeaders); }) .verifyComplete()); } /** * Test helper that calling list configuration setting with given key and label input * * @param keyFilter key filter expression * @param labelFilter label filter expression */ private void filterValueTest(String keyFilter, String labelFilter) { listConfigurationSettingsSelectFieldsWithNotSupportedFilterRunner(keyFilter, labelFilter, selector -> StepVerifier.create(client.listConfigurationSettings(selector)) .verifyError(HttpResponseException.class)); } }
class ConfigurationAsyncClientTest extends ConfigurationClientTestBase { private final ClientLogger logger = new ClientLogger(ConfigurationAsyncClientTest.class); private static final String NO_LABEL = null; private ConfigurationAsyncClient client; @Override protected String getTestName() { return ""; } @Override protected void beforeTest() { beforeTestSetup(); } @Override protected void afterTest() { logger.info("Cleaning up created key values."); client.listConfigurationSettings(new SettingSelector().setKeyFilter(keyPrefix + "*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); Mono<Response<ConfigurationSetting>> unlock = configurationSetting.isReadOnly() ? client.setReadOnlyWithResponse(configurationSetting, false) : Mono.empty(); return unlock.then(client.deleteConfigurationSettingWithResponse(configurationSetting, false)); }) .blockLast(); logger.info("Finished cleaning up values."); } private ConfigurationAsyncClient getConfigurationAsyncClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { return clientSetup(credentials -> { ConfigurationClientBuilder builder = new ConfigurationClientBuilder() .connectionString(connectionString) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .serviceVersion(serviceVersion) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)); if (getTestMode() != TestMode.PLAYBACK) { builder .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); }); } /** * Tests that a configuration is able to be added, these are differentiate from each other using a key or key-label * identifier. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot add a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting("", null, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can add configuration settings when value is not null or an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.addConfigurationSetting(setting.getKey(), setting.getLabel(), setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting(null, null, "A Value")) .expectError(IllegalArgumentException.class) .verify(); StepVerifier.create(client.addConfigurationSettingWithResponse(null)) .expectError(NullPointerException.class) .verify(); } /** * Tests that a configuration cannot be added twice with the same key. This should return a 412 error. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addExistingSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addExistingSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.addConfigurationSettingWithResponse(expected))) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceExistsException.class, HttpURLConnection.HTTP_PRECON_FAILED))); } /** * Tests that a configuration is able to be added or updated with set. * When the configuration is read-only updates cannot happen, this will result in a 409. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingRunner((expected, update) -> StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that when an ETag is passed to set it will only set if the current representation of the setting has the * ETag. If the set ETag doesn't match anything the update won't happen, this will result in a 412. This will * prevent set from doing an add as well. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingIfETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingIfETagRunner((initial, update) -> { StepVerifier.create(client.setConfigurationSettingWithResponse(initial.setETag("badEtag"), true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); final String etag = client.addConfigurationSettingWithResponse(initial).block().getValue().getETag(); StepVerifier.create(client.setConfigurationSettingWithResponse(update.setETag(etag), true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(initial, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.getConfigurationSettingWithResponse(update, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); }); } /** * Tests that we cannot set a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting("", NO_LABEL, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can set configuration settings when value is not null or an empty string. * Value is not a required property. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.setConfigurationSetting(setting.getKey(), NO_LABEL, setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting(null, NO_LABEL, "A Value")) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.setConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests that a configuration is able to be retrieved when it exists, whether or not it is read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); getConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that attempting to retrieve a non-existent configuration doesn't work, this will result in a 404. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverRetrievedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverRetreivedValue"); final ConfigurationSetting nonExistentLabel = new ConfigurationSetting().setKey(key).setLabel("myNonExistentLabel"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverRetrievedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverRetrievedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting("myNonExistentKey", null, null)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); StepVerifier.create(client.getConfigurationSettingWithResponse(nonExistentLabel, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that configurations are able to be deleted when they exist. * After the configuration has been deleted attempting to get it will result in a 404, the same as if the * configuration never existed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(expected, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Tests that attempting to delete a non-existent configuration will return a 204. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverDeletedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverDeletedValue"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverDeletedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey("myNonExistentKey"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(neverDeletedConfiguration.getKey()).setLabel("myNonExistentLabel"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(neverDeletedConfiguration.getKey(), neverDeletedConfiguration.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); } /** * Tests that when an ETag is passed to delete it will only delete if the current representation of the setting has the ETag. * If the delete ETag doesn't match anything the delete won't happen, this will result in a 412. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingWithETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingWithETagRunner((initial, update) -> { final ConfigurationSetting initiallyAddedConfig = client.addConfigurationSettingWithResponse(initial).block().getValue(); final ConfigurationSetting updatedConfig = client.setConfigurationSettingWithResponse(update, true).block().getValue(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(initiallyAddedConfig, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.deleteConfigurationSettingWithResponse(updatedConfig, true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.deleteConfigurationSetting(null, null)) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.deleteConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnlyWithResponse(expected, true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnlyWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Verifies that a ConfigurationSetting can be added with a label, and that we can fetch that ConfigurationSetting * from the service when filtering by either its label or just its key. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithKeyAndLabel(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String value = "myValue"; final String key = testResourceNamer.randomName(keyPrefix, 16); final String label = testResourceNamer.randomName("lbl", 8); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue(value).setLabel(label); StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); } /** * Verifies that ConfigurationSettings can be added and that we can fetch those ConfigurationSettings from the * service when filtering by their keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper /** * Verifies that ConfigurationSettings can be added with different labels and that we can fetch those ConfigurationSettings * from the service when filtering by their labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listWithMultipleLabelsRunner(key, label, label2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can select filter results by key, label, and select fields using SettingSelector. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFields(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); listConfigurationSettingsSelectFieldsRunner((settings, selector) -> { final List<Mono<Response<ConfigurationSetting>>> settingsBeingAdded = new ArrayList<>(); for (ConfigurationSetting setting : settings) { settingsBeingAdded.add(client.setConfigurationSettingWithResponse(setting, false)); } Flux.merge(settingsBeingAdded).blockLast(); List<ConfigurationSetting> settingsReturned = new ArrayList<>(); StepVerifier.create(client.listConfigurationSettings(selector)) .assertNext(settingsReturned::add) .assertNext(settingsReturned::add) .verifyComplete(); return settingsReturned; }); } /** * Verifies that throws exception when using SettingSelector with not supported *a key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey(), getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey() + "*", getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel() + "*"); } /** * Verifies that we can get a ConfigurationSetting at the provided accept datetime */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listConfigurationSettings(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); } /** * Verifies that we can get all of the revisions for this ConfigurationSetting. Then verifies that we can select * specific fields. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisions(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName))) .assertNext(response -> assertConfigurationEquals(updated2, response)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName).setFields(SettingFields.KEY, SettingFields.ETAG))) .assertNext(response -> validateListRevisions(updated2, response)) .assertNext(response -> validateListRevisions(updated, response)) .assertNext(response -> validateListRevisions(original, response)) .verifyComplete(); assertTrue(client.listRevisions(null).toStream().collect(Collectors.toList()).size() > 0); } /** * Verifies that we can get all the revisions for all settings with the specified keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listRevisionsWithMultipleKeysRunner(key, key2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get all revisions for all settings with the specified labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listRevisionsWithMultipleLabelsRunner(key, label, label2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get a subset of revisions based on the "acceptDateTime" */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listRevisions(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listRevisions(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatStream(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatIterator(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of existing settings, we can list the ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix + "-" + value).setValue("myValue").setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix + "-*").setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listConfigurationSettings(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies the conditional "GET" scenario where the setting has yet to be updated, resulting in a 304. This GET * scenario will return a setting when the ETag provided does not match the one of the current setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingWhenValueNotUpdated(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue("myValue"); final ConfigurationSetting newExpected = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting block = client.addConfigurationSettingWithResponse(expected).block().getValue(); assertNotNull(block); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(null, response, 304)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(newExpected, false)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper @Disabled public void deleteAllSettings(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); client.listConfigurationSettings(new SettingSelector().setKeyFilter("*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); return client.deleteConfigurationSettingWithResponse(configurationSetting, false); }).blockLast(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addHeadersFromContextPolicyTest(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final HttpHeaders headers = getCustomizedHeaders(); addHeadersFromContextPolicyRunner(expected -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected) .subscriberContext(Context.of(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers))) .assertNext(response -> { final HttpHeaders requestHeaders = response.getRequest().getHeaders(); assertContainsHeaders(headers, requestHeaders); }) .verifyComplete()); } /** * Test helper that calling list configuration setting with given key and label input * * @param keyFilter key filter expression * @param labelFilter label filter expression */ private void filterValueTest(String keyFilter, String labelFilter) { listConfigurationSettingsSelectFieldsWithNotSupportedFilterRunner(keyFilter, labelFilter, selector -> StepVerifier.create(client.listConfigurationSettings(selector)) .verifyError(HttpResponseException.class)); } }
nit : `Mono<Void>` is not starting from connect indentation. Is it just git showing this way or this should be fixed ?
public void listConfigurationSettingsWithNullSelector(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); Mono<Void> deleteAllSettingsMono = client.listConfigurationSettings(null) .flatMap(setting -> client.deleteConfigurationSettingWithResponse(setting, false)) .then(); StepVerifier.create(deleteAllSettingsMono) .verifyComplete(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(null)) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); assertEquals(2, selected.size()); return selected; }); }
Mono<Void> deleteAllSettingsMono = client.listConfigurationSettings(null)
public void listConfigurationSettingsWithNullSelector(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final String key2 = getKey(); StepVerifier.create( client.listConfigurationSettings(null) .flatMap(setting -> client.deleteConfigurationSettingWithResponse(setting, false)) .then()) .verifyComplete(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(null)) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); assertEquals(2, selected.size()); return selected; }); }
class ConfigurationAsyncClientTest extends ConfigurationClientTestBase { private final ClientLogger logger = new ClientLogger(ConfigurationAsyncClientTest.class); private static final String NO_LABEL = null; private ConfigurationAsyncClient client; @Override protected String getTestName() { return ""; } @Override protected void beforeTest() { beforeTestSetup(); } @Override protected void afterTest() { logger.info("Cleaning up created key values."); client.listConfigurationSettings(new SettingSelector().setKeyFilter(keyPrefix + "*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); Mono<Response<ConfigurationSetting>> unlock = configurationSetting.isReadOnly() ? client.setReadOnlyWithResponse(configurationSetting, false) : Mono.empty(); return unlock.then(client.deleteConfigurationSettingWithResponse(configurationSetting, false)); }) .blockLast(); logger.info("Finished cleaning up values."); } private ConfigurationAsyncClient getConfigurationAsyncClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { return clientSetup(credentials -> { ConfigurationClientBuilder builder = new ConfigurationClientBuilder() .connectionString(connectionString) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .serviceVersion(serviceVersion) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)); if (getTestMode() != TestMode.PLAYBACK) { builder .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); }); } /** * Tests that a configuration is able to be added, these are differentiate from each other using a key or key-label * identifier. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot add a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting("", null, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can add configuration settings when value is not null or an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.addConfigurationSetting(setting.getKey(), setting.getLabel(), setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting(null, null, "A Value")) .expectError(IllegalArgumentException.class) .verify(); StepVerifier.create(client.addConfigurationSettingWithResponse(null)) .expectError(NullPointerException.class) .verify(); } /** * Tests that a configuration cannot be added twice with the same key. This should return a 412 error. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addExistingSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addExistingSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.addConfigurationSettingWithResponse(expected))) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceExistsException.class, HttpURLConnection.HTTP_PRECON_FAILED))); } /** * Tests that a configuration is able to be added or updated with set. * When the configuration is read-only updates cannot happen, this will result in a 409. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingRunner((expected, update) -> StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that when an ETag is passed to set it will only set if the current representation of the setting has the * ETag. If the set ETag doesn't match anything the update won't happen, this will result in a 412. This will * prevent set from doing an add as well. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingIfETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingIfETagRunner((initial, update) -> { StepVerifier.create(client.setConfigurationSettingWithResponse(initial.setETag("badEtag"), true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); final String etag = client.addConfigurationSettingWithResponse(initial).block().getValue().getETag(); StepVerifier.create(client.setConfigurationSettingWithResponse(update.setETag(etag), true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(initial, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.getConfigurationSettingWithResponse(update, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); }); } /** * Tests that we cannot set a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting("", NO_LABEL, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can set configuration settings when value is not null or an empty string. * Value is not a required property. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.setConfigurationSetting(setting.getKey(), NO_LABEL, setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting(null, NO_LABEL, "A Value")) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.setConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests that a configuration is able to be retrieved when it exists, whether or not it is read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); getConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that attempting to retrieve a non-existent configuration doesn't work, this will result in a 404. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverRetrievedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverRetreivedValue"); final ConfigurationSetting nonExistentLabel = new ConfigurationSetting().setKey(key).setLabel("myNonExistentLabel"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverRetrievedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverRetrievedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting("myNonExistentKey", null, null)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); StepVerifier.create(client.getConfigurationSettingWithResponse(nonExistentLabel, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that configurations are able to be deleted when they exist. * After the configuration has been deleted attempting to get it will result in a 404, the same as if the * configuration never existed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(expected, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Tests that attempting to delete a non-existent configuration will return a 204. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverDeletedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverDeletedValue"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverDeletedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey("myNonExistentKey"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(neverDeletedConfiguration.getKey()).setLabel("myNonExistentLabel"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(neverDeletedConfiguration.getKey(), neverDeletedConfiguration.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); } /** * Tests that when an ETag is passed to delete it will only delete if the current representation of the setting has the ETag. * If the delete ETag doesn't match anything the delete won't happen, this will result in a 412. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingWithETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingWithETagRunner((initial, update) -> { final ConfigurationSetting initiallyAddedConfig = client.addConfigurationSettingWithResponse(initial).block().getValue(); final ConfigurationSetting updatedConfig = client.setConfigurationSettingWithResponse(update, true).block().getValue(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(initiallyAddedConfig, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.deleteConfigurationSettingWithResponse(updatedConfig, true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.deleteConfigurationSetting(null, null)) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.deleteConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnlyWithResponse(expected, true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnlyWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Verifies that a ConfigurationSetting can be added with a label, and that we can fetch that ConfigurationSetting * from the service when filtering by either its label or just its key. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithKeyAndLabel(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String value = "myValue"; final String key = testResourceNamer.randomName(keyPrefix, 16); final String label = testResourceNamer.randomName("lbl", 8); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue(value).setLabel(label); StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); } /** * Verifies that ConfigurationSettings can be added and that we can fetch those ConfigurationSettings from the * service when filtering by their keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper /** * Verifies that ConfigurationSettings can be added with different labels and that we can fetch those ConfigurationSettings * from the service when filtering by their labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listWithMultipleLabelsRunner(key, label, label2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can select filter results by key, label, and select fields using SettingSelector. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFields(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); listConfigurationSettingsSelectFieldsRunner((settings, selector) -> { final List<Mono<Response<ConfigurationSetting>>> settingsBeingAdded = new ArrayList<>(); for (ConfigurationSetting setting : settings) { settingsBeingAdded.add(client.setConfigurationSettingWithResponse(setting, false)); } Flux.merge(settingsBeingAdded).blockLast(); List<ConfigurationSetting> settingsReturned = new ArrayList<>(); StepVerifier.create(client.listConfigurationSettings(selector)) .assertNext(settingsReturned::add) .assertNext(settingsReturned::add) .verifyComplete(); return settingsReturned; }); } /** * Verifies that throws exception when using SettingSelector with not supported *a key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey(), getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey() + "*", getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel() + "*"); } /** * Verifies that we can get a ConfigurationSetting at the provided accept datetime */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listConfigurationSettings(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); } /** * Verifies that we can get all of the revisions for this ConfigurationSetting. Then verifies that we can select * specific fields. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisions(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName))) .assertNext(response -> assertConfigurationEquals(updated2, response)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName).setFields(SettingFields.KEY, SettingFields.ETAG))) .assertNext(response -> validateListRevisions(updated2, response)) .assertNext(response -> validateListRevisions(updated, response)) .assertNext(response -> validateListRevisions(original, response)) .verifyComplete(); assertTrue(client.listRevisions(null).toStream().collect(Collectors.toList()).size() > 0); } /** * Verifies that we can get all the revisions for all settings with the specified keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listRevisionsWithMultipleKeysRunner(key, key2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get all revisions for all settings with the specified labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listRevisionsWithMultipleLabelsRunner(key, label, label2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get a subset of revisions based on the "acceptDateTime" */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listRevisions(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listRevisions(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatStream(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatIterator(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of existing settings, we can list the ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix + "-" + value).setValue("myValue").setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix + "-*").setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listConfigurationSettings(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies the conditional "GET" scenario where the setting has yet to be updated, resulting in a 304. This GET * scenario will return a setting when the ETag provided does not match the one of the current setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingWhenValueNotUpdated(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue("myValue"); final ConfigurationSetting newExpected = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting block = client.addConfigurationSettingWithResponse(expected).block().getValue(); assertNotNull(block); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(null, response, 304)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(newExpected, false)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper @Disabled public void deleteAllSettings(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); client.listConfigurationSettings(new SettingSelector().setKeyFilter("*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); return client.deleteConfigurationSettingWithResponse(configurationSetting, false); }).blockLast(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addHeadersFromContextPolicyTest(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final HttpHeaders headers = getCustomizedHeaders(); addHeadersFromContextPolicyRunner(expected -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected) .subscriberContext(Context.of(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers))) .assertNext(response -> { final HttpHeaders requestHeaders = response.getRequest().getHeaders(); assertContainsHeaders(headers, requestHeaders); }) .verifyComplete()); } /** * Test helper that calling list configuration setting with given key and label input * * @param keyFilter key filter expression * @param labelFilter label filter expression */ private void filterValueTest(String keyFilter, String labelFilter) { listConfigurationSettingsSelectFieldsWithNotSupportedFilterRunner(keyFilter, labelFilter, selector -> StepVerifier.create(client.listConfigurationSettings(selector)) .verifyError(HttpResponseException.class)); } }
class ConfigurationAsyncClientTest extends ConfigurationClientTestBase { private final ClientLogger logger = new ClientLogger(ConfigurationAsyncClientTest.class); private static final String NO_LABEL = null; private ConfigurationAsyncClient client; @Override protected String getTestName() { return ""; } @Override protected void beforeTest() { beforeTestSetup(); } @Override protected void afterTest() { logger.info("Cleaning up created key values."); client.listConfigurationSettings(new SettingSelector().setKeyFilter(keyPrefix + "*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); Mono<Response<ConfigurationSetting>> unlock = configurationSetting.isReadOnly() ? client.setReadOnlyWithResponse(configurationSetting, false) : Mono.empty(); return unlock.then(client.deleteConfigurationSettingWithResponse(configurationSetting, false)); }) .blockLast(); logger.info("Finished cleaning up values."); } private ConfigurationAsyncClient getConfigurationAsyncClient(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { return clientSetup(credentials -> { ConfigurationClientBuilder builder = new ConfigurationClientBuilder() .connectionString(connectionString) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .serviceVersion(serviceVersion) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)); if (getTestMode() != TestMode.PLAYBACK) { builder .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); }); } /** * Tests that a configuration is able to be added, these are differentiate from each other using a key or key-label * identifier. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that we cannot add a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting("", null, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can add configuration settings when value is not null or an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.addConfigurationSetting(setting.getKey(), setting.getLabel(), setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.addConfigurationSetting(null, null, "A Value")) .expectError(IllegalArgumentException.class) .verify(); StepVerifier.create(client.addConfigurationSettingWithResponse(null)) .expectError(NullPointerException.class) .verify(); } /** * Tests that a configuration cannot be added twice with the same key. This should return a 412 error. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addExistingSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); addExistingSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.addConfigurationSettingWithResponse(expected))) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceExistsException.class, HttpURLConnection.HTTP_PRECON_FAILED))); } /** * Tests that a configuration is able to be added or updated with set. * When the configuration is read-only updates cannot happen, this will result in a 409. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingRunner((expected, update) -> StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that when an ETag is passed to set it will only set if the current representation of the setting has the * ETag. If the set ETag doesn't match anything the update won't happen, this will result in a 412. This will * prevent set from doing an add as well. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingIfETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingIfETagRunner((initial, update) -> { StepVerifier.create(client.setConfigurationSettingWithResponse(initial.setETag("badEtag"), true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); final String etag = client.addConfigurationSettingWithResponse(initial).block().getValue().getETag(); StepVerifier.create(client.setConfigurationSettingWithResponse(update.setETag(etag), true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(initial, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.getConfigurationSettingWithResponse(update, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); }); } /** * Tests that we cannot set a configuration setting when the key is an empty string. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting("", NO_LABEL, "A value")) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_BAD_METHOD)); } /** * Tests that we can set configuration settings when value is not null or an empty string. * Value is not a required property. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingEmptyValue(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); setConfigurationSettingEmptyValueRunner((setting) -> { StepVerifier.create(client.setConfigurationSetting(setting.getKey(), NO_LABEL, setting.getValue())) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(setting.getKey(), setting.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); }); } /** * Verifies that an exception is thrown when null key is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.setConfigurationSetting(null, NO_LABEL, "A Value")) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.setConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests that a configuration is able to be retrieved when it exists, whether or not it is read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); getConfigurationSettingRunner((expected) -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete()); } /** * Tests that attempting to retrieve a non-existent configuration doesn't work, this will result in a 404. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverRetrievedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverRetreivedValue"); final ConfigurationSetting nonExistentLabel = new ConfigurationSetting().setKey(key).setLabel("myNonExistentLabel"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverRetrievedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverRetrievedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting("myNonExistentKey", null, null)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); StepVerifier.create(client.getConfigurationSettingWithResponse(nonExistentLabel, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); } /** * Tests that configurations are able to be deleted when they exist. * After the configuration has been deleted attempting to get it will result in a 404, the same as if the * configuration never existed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected).then(client.getConfigurationSettingWithResponse(expected, null, false))) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(expected, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Tests that attempting to delete a non-existent configuration will return a 204. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNotFound(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting neverDeletedConfiguration = new ConfigurationSetting().setKey(key).setValue("myNeverDeletedValue"); StepVerifier.create(client.addConfigurationSettingWithResponse(neverDeletedConfiguration)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey("myNonExistentKey"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(new ConfigurationSetting().setKey(neverDeletedConfiguration.getKey()).setLabel("myNonExistentLabel"), false)) .assertNext(response -> assertConfigurationEquals(null, response, HttpURLConnection.HTTP_NO_CONTENT)) .verifyComplete(); StepVerifier.create(client.getConfigurationSetting(neverDeletedConfiguration.getKey(), neverDeletedConfiguration.getLabel(), null)) .assertNext(response -> assertConfigurationEquals(neverDeletedConfiguration, response)) .verifyComplete(); } /** * Tests that when an ETag is passed to delete it will only delete if the current representation of the setting has the ETag. * If the delete ETag doesn't match anything the delete won't happen, this will result in a 412. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingWithETag(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); deleteConfigurationSettingWithETagRunner((initial, update) -> { final ConfigurationSetting initiallyAddedConfig = client.addConfigurationSettingWithResponse(initial).block().getValue(); final ConfigurationSetting updatedConfig = client.setConfigurationSettingWithResponse(update, true).block().getValue(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(initiallyAddedConfig, true)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, HttpURLConnection.HTTP_PRECON_FAILED)); StepVerifier.create(client.deleteConfigurationSettingWithResponse(updatedConfig, true)) .assertNext(response -> assertConfigurationEquals(update, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(initial, null, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND)); }); } /** * Test the API will not make a delete call without having a key passed, an IllegalArgumentException should be thrown. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void deleteConfigurationSettingNullKey(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.deleteConfigurationSetting(null, null)) .verifyError(IllegalArgumentException.class); StepVerifier.create(client.deleteConfigurationSettingWithResponse(null, false)) .verifyError(NullPointerException.class); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnly(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Tests assert that the setting can not be deleted after set the setting to read-only. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void setReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnlyWithResponse(expected, true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); }); } /** * Tests assert that the setting can be deleted after clear read-only of the setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void clearReadOnlyWithConfigurationSetting(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); lockUnlockRunner((expected) -> { StepVerifier.create(client.addConfigurationSettingWithResponse(expected)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.setReadOnly(expected.getKey(), expected.getLabel(), true)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpResponseException.class, 409)); StepVerifier.create(client.setReadOnlyWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.deleteConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); }); } /** * Verifies that a ConfigurationSetting can be added with a label, and that we can fetch that ConfigurationSetting * from the service when filtering by either its label or just its key. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithKeyAndLabel(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String value = "myValue"; final String key = testResourceNamer.randomName(keyPrefix, 16); final String label = testResourceNamer.randomName("lbl", 8); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue(value).setLabel(label); StepVerifier.create(client.setConfigurationSettingWithResponse(expected, false)) .assertNext(response -> assertConfigurationEquals(expected, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key))) .assertNext(configurationSetting -> assertConfigurationEquals(expected, configurationSetting)) .verifyComplete(); } /** * Verifies that ConfigurationSettings can be added and that we can fetch those ConfigurationSettings from the * service when filtering by their keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listWithMultipleKeysRunner(key, key2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper /** * Verifies that ConfigurationSettings can be added with different labels and that we can fetch those ConfigurationSettings * from the service when filtering by their labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listWithMultipleLabelsRunner(key, label, label2, (setting, setting2) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting)) .assertNext(response -> assertConfigurationEquals(setting, response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(setting2)) .assertNext(response -> assertConfigurationEquals(setting2, response)) .verifyComplete(); StepVerifier.create(client.listConfigurationSettings(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can select filter results by key, label, and select fields using SettingSelector. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFields(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); listConfigurationSettingsSelectFieldsRunner((settings, selector) -> { final List<Mono<Response<ConfigurationSetting>>> settingsBeingAdded = new ArrayList<>(); for (ConfigurationSetting setting : settings) { settingsBeingAdded.add(client.setConfigurationSettingWithResponse(setting, false)); } Flux.merge(settingsBeingAdded).blockLast(); List<ConfigurationSetting> settingsReturned = new ArrayList<>(); StepVerifier.create(client.listConfigurationSettings(selector)) .assertNext(settingsReturned::add) .assertNext(settingsReturned::add) .verifyComplete(); return settingsReturned; }); } /** * Verifies that throws exception when using SettingSelector with not supported *a key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey(), getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* key filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringKeyFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest("*" + getKey() + "*", getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithPrefixStarLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel()); } /** * Verifies that throws exception when using SettingSelector with not supported *a* label filter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsSelectFieldsWithSubstringLabelFilter(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); filterValueTest(getKey(), "*" + getLabel() + "*"); } /** * Verifies that we can get a ConfigurationSetting at the provided accept datetime */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listConfigurationSettings(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); } /** * Verifies that we can get all of the revisions for this ConfigurationSetting. Then verifies that we can select * specific fields. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisions(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false)) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName))) .assertNext(response -> assertConfigurationEquals(updated2, response)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(keyName).setFields(SettingFields.KEY, SettingFields.ETAG))) .assertNext(response -> validateListRevisions(updated2, response)) .assertNext(response -> validateListRevisions(updated, response)) .assertNext(response -> validateListRevisions(original, response)) .verifyComplete(); assertTrue(client.listRevisions(null).toStream().collect(Collectors.toList()).size() > 0); } /** * Verifies that we can get all the revisions for all settings with the specified keys. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleKeys(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String key2 = getKey(); listRevisionsWithMultipleKeysRunner(key, key2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key + "," + key2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get all revisions for all settings with the specified labels. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithMultipleLabels(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); String key = getKey(); String label = getLabel(); String label2 = getLabel(); listRevisionsWithMultipleLabelsRunner(key, label, label2, (testInput) -> { List<ConfigurationSetting> selected = new ArrayList<>(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(0))) .assertNext(response -> assertConfigurationEquals(testInput.get(0), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(1), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(1), response)) .verifyComplete(); StepVerifier.create(client.addConfigurationSettingWithResponse(testInput.get(2))) .assertNext(response -> assertConfigurationEquals(testInput.get(2), response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(testInput.get(3), false)) .assertNext(response -> assertConfigurationEquals(testInput.get(3), response)) .verifyComplete(); StepVerifier.create(client.listRevisions(new SettingSelector().setKeyFilter(key).setLabelFilter(label + "," + label2))) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .consumeNextWith(selected::add) .verifyComplete(); return selected; }); } /** * Verifies that we can get a subset of revisions based on the "acceptDateTime" */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsAcceptDateTime(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String keyName = testResourceNamer.randomName(keyPrefix, 16); final ConfigurationSetting original = new ConfigurationSetting().setKey(keyName).setValue("myValue"); final ConfigurationSetting updated = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue"); final ConfigurationSetting updated2 = new ConfigurationSetting().setKey(original.getKey()).setValue("anotherValue2"); StepVerifier.create(client.setConfigurationSettingWithResponse(original, false)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated, false).delayElement(Duration.ofSeconds(2))) .assertNext(response -> assertConfigurationEquals(updated, response)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(updated2, false)) .assertNext(response -> assertConfigurationEquals(updated2, response)) .verifyComplete(); List<ConfigurationSetting> revisions = client.listRevisions(new SettingSelector().setKeyFilter(keyName)).collectList().block(); assertNotNull(revisions); assertEquals(3, revisions.size()); SettingSelector options = new SettingSelector().setKeyFilter(keyName).setAcceptDatetime(revisions.get(1).getLastModified()); StepVerifier.create(client.listRevisions(options)) .assertNext(response -> assertConfigurationEquals(updated, response)) .assertNext(response -> assertConfigurationEquals(original, response)) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listRevisions(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatStream(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toStream().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of revisions, we can list the revisions ConfigurationSettings using pagination and stream is invoked multiple times. * (ie. where 'nextLink' has a URL pointing to the next page of results.) */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listRevisionsWithPaginationAndRepeatIterator(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (int value = 0; value < numberExpected; value++) { ConfigurationSetting setting = new ConfigurationSetting().setKey(keyPrefix).setValue("myValue" + value).setLabel(labelPrefix); settings.add(setting); results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix).setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); List<ConfigurationSetting> configurationSettingList1 = new ArrayList<>(); List<ConfigurationSetting> configurationSettingList2 = new ArrayList<>(); PagedFlux<ConfigurationSetting> configurationSettingPagedFlux = client.listRevisions(filter); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList1.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList1.size()); configurationSettingPagedFlux.toIterable().forEach(configurationSetting -> configurationSettingList2.add(configurationSetting)); assertEquals(numberExpected, configurationSettingList2.size()); } /** * Verifies that, given a ton of existing settings, we can list the ConfigurationSettings using pagination * (ie. where 'nextLink' has a URL pointing to the next page of results. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void listConfigurationSettingsWithPagination(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final int numberExpected = 50; List<ConfigurationSetting> settings = new ArrayList<>(numberExpected); for (int value = 0; value < numberExpected; value++) { settings.add(new ConfigurationSetting().setKey(keyPrefix + "-" + value).setValue("myValue").setLabel(labelPrefix)); } List<Mono<Response<ConfigurationSetting>>> results = new ArrayList<>(); for (ConfigurationSetting setting : settings) { results.add(client.setConfigurationSettingWithResponse(setting, false)); } SettingSelector filter = new SettingSelector().setKeyFilter(keyPrefix + "-*").setLabelFilter(labelPrefix); Flux.merge(results).blockLast(); StepVerifier.create(client.listConfigurationSettings(filter)) .expectNextCount(numberExpected) .verifyComplete(); } /** * Verifies the conditional "GET" scenario where the setting has yet to be updated, resulting in a 304. This GET * scenario will return a setting when the ETag provided does not match the one of the current setting. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void getConfigurationSettingWhenValueNotUpdated(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final String key = getKey(); final ConfigurationSetting expected = new ConfigurationSetting().setKey(key).setValue("myValue"); final ConfigurationSetting newExpected = new ConfigurationSetting().setKey(key).setValue("myNewValue"); final ConfigurationSetting block = client.addConfigurationSettingWithResponse(expected).block().getValue(); assertNotNull(block); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(null, response, 304)) .verifyComplete(); StepVerifier.create(client.setConfigurationSettingWithResponse(newExpected, false)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); StepVerifier.create(client.getConfigurationSettingWithResponse(block, null, true)) .assertNext(response -> assertConfigurationEquals(newExpected, response)) .verifyComplete(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper @Disabled public void deleteAllSettings(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); client.listConfigurationSettings(new SettingSelector().setKeyFilter("*")) .flatMap(configurationSetting -> { logger.info("Deleting key:label [{}:{}]. isReadOnly? {}", configurationSetting.getKey(), configurationSetting.getLabel(), configurationSetting.isReadOnly()); return client.deleteConfigurationSettingWithResponse(configurationSetting, false); }).blockLast(); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.data.appconfiguration.TestHelper public void addHeadersFromContextPolicyTest(HttpClient httpClient, ConfigurationServiceVersion serviceVersion) { client = getConfigurationAsyncClient(httpClient, serviceVersion); final HttpHeaders headers = getCustomizedHeaders(); addHeadersFromContextPolicyRunner(expected -> StepVerifier.create(client.addConfigurationSettingWithResponse(expected) .subscriberContext(Context.of(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers))) .assertNext(response -> { final HttpHeaders requestHeaders = response.getRequest().getHeaders(); assertContainsHeaders(headers, requestHeaders); }) .verifyComplete()); } /** * Test helper that calling list configuration setting with given key and label input * * @param keyFilter key filter expression * @param labelFilter label filter expression */ private void filterValueTest(String keyFilter, String labelFilter) { listConfigurationSettingsSelectFieldsWithNotSupportedFilterRunner(keyFilter, labelFilter, selector -> StepVerifier.create(client.listConfigurationSettings(selector)) .verifyError(HttpResponseException.class)); } }
This would only make it in the next release now?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) { return next.process(); } Span parentSpan = (Span) context.getData(PARENT_SPAN_KEY).orElse(Span.current()); HttpRequest request = context.getHttpRequest(); final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl()); SpanBuilder spanBuilder = TRACER.spanBuilder(urlBuilder.getPath()) .setParent(io.opentelemetry.context.Context.current().with(parentSpan)); spanBuilder.setSpanKind(Span.Kind.CLIENT); Span span = spanBuilder.startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, request, context); } SpanContext spanContext = span.getSpanContext(); if (spanContext.isValid()) { traceContextFormat.inject(io.opentelemetry.context.Context.current(), request, contextSetter); } return next.process() .doOnEach(OpenTelemetryHttpPolicy::handleResponse) .subscriberContext(Context.of("TRACING_SPAN", span, "REQUEST", request)); }
if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) { return next.process(); } Span parentSpan = (Span) context.getData(PARENT_SPAN_KEY).orElse(Span.current()); HttpRequest request = context.getHttpRequest(); final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl()); SpanBuilder spanBuilder = TRACER.spanBuilder(urlBuilder.getPath()) .setParent(io.opentelemetry.context.Context.current().with(parentSpan)); spanBuilder.setSpanKind(Span.Kind.CLIENT); Span span = spanBuilder.startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, request, context); } SpanContext spanContext = span.getSpanContext(); if (spanContext.isValid()) { traceContextFormat.inject(io.opentelemetry.context.Context.current(), request, contextSetter); } return next.process() .doOnEach(OpenTelemetryHttpPolicy::handleResponse) .subscriberContext(Context.of("TRACING_SPAN", span, "REQUEST", request)); }
class implements W3C distributed tracing protocol and injects SpanContext into the outgoing http private final TextMapPropagator traceContextFormat = W3CTraceContextPropagator.getInstance(); @Override
class implements W3C distributed tracing protocol and injects SpanContext into the outgoing http private final TextMapPropagator traceContextFormat = W3CTraceContextPropagator.getInstance(); @Override
Yes, this is for next release only.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) { return next.process(); } Span parentSpan = (Span) context.getData(PARENT_SPAN_KEY).orElse(Span.current()); HttpRequest request = context.getHttpRequest(); final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl()); SpanBuilder spanBuilder = TRACER.spanBuilder(urlBuilder.getPath()) .setParent(io.opentelemetry.context.Context.current().with(parentSpan)); spanBuilder.setSpanKind(Span.Kind.CLIENT); Span span = spanBuilder.startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, request, context); } SpanContext spanContext = span.getSpanContext(); if (spanContext.isValid()) { traceContextFormat.inject(io.opentelemetry.context.Context.current(), request, contextSetter); } return next.process() .doOnEach(OpenTelemetryHttpPolicy::handleResponse) .subscriberContext(Context.of("TRACING_SPAN", span, "REQUEST", request)); }
if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if ((boolean) context.getData(DISABLE_TRACING_KEY).orElse(false)) { return next.process(); } Span parentSpan = (Span) context.getData(PARENT_SPAN_KEY).orElse(Span.current()); HttpRequest request = context.getHttpRequest(); final UrlBuilder urlBuilder = UrlBuilder.parse(context.getHttpRequest().getUrl()); SpanBuilder spanBuilder = TRACER.spanBuilder(urlBuilder.getPath()) .setParent(io.opentelemetry.context.Context.current().with(parentSpan)); spanBuilder.setSpanKind(Span.Kind.CLIENT); Span span = spanBuilder.startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, request, context); } SpanContext spanContext = span.getSpanContext(); if (spanContext.isValid()) { traceContextFormat.inject(io.opentelemetry.context.Context.current(), request, contextSetter); } return next.process() .doOnEach(OpenTelemetryHttpPolicy::handleResponse) .subscriberContext(Context.of("TRACING_SPAN", span, "REQUEST", request)); }
class implements W3C distributed tracing protocol and injects SpanContext into the outgoing http private final TextMapPropagator traceContextFormat = W3CTraceContextPropagator.getInstance(); @Override
class implements W3C distributed tracing protocol and injects SpanContext into the outgoing http private final TextMapPropagator traceContextFormat = W3CTraceContextPropagator.getInstance(); @Override
nit: remove comments
public void throughputLocalControl(OperationType operationType) { ThroughputControlGroup group = container.enableThroughputLocalControlGroup("group-" + UUID.randomUUID(), 10); CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setContentResponseOnWriteEnabled(true); requestOptions.setThroughputControlGroupName(group.getGroupName()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(getDocumentDefinition(), requestOptions).block(); TestItem createdItem = createItemResponse.getItem(); this.validateRequestNotThrottled( createItemResponse.getDiagnostics().toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); CosmosDiagnostics cosmosDiagnostics = performDocumentOperation(operationType, createdItem, group.getGroupName()); this.validateRequestThrottled( cosmosDiagnostics.toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); }
requestOptions.setThroughputControlGroupName(group.getGroupName());
public void throughputLocalControl(OperationType operationType) { ThroughputControlGroupConfig groupConfig = new ThroughputControlGroupConfigBuilder() .setGroupName("group-" + UUID.randomUUID()) .setTargetThroughput(6) .build(); container.enableLocalThroughputControlGroup(groupConfig); CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setContentResponseOnWriteEnabled(true); requestOptions.setThroughputControlGroupName(groupConfig.getGroupName()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(getDocumentDefinition(), requestOptions).block(); TestItem createdItem = createItemResponse.getItem(); this.validateRequestNotThrottled( createItemResponse.getDiagnostics().toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); CosmosDiagnostics cosmosDiagnostics = performDocumentOperation(this.container, operationType, createdItem, groupConfig.getGroupName()); this.validateRequestThrottled( cosmosDiagnostics.toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); }
class ThroughputControlTests extends TestSuiteBase { private CosmosAsyncClient client; private CosmosAsyncDatabase database; private CosmosAsyncContainer container; @Factory(dataProvider = "simpleClientBuildersForDirectTcpWithoutRetryOnThrottledRequests") public ThroughputControlTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); this.subscriberValidationTimeout = TIMEOUT; } @DataProvider public static Object[][] operationTypeProvider() { return new Object[][]{ { OperationType.Read }, { OperationType.Replace }, { OperationType.Create }, { OperationType.Delete }, }; } @Test(groups = {"emulator"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) @Test(groups = {"emulator"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void throughputGlobalControl(OperationType operationType) throws InterruptedException { CosmosAsyncContainer controlContainer = database.getContainer("throughputControlContainer"); database.createContainerIfNotExists(controlContainer.getId(), "/group").block(); ThroughputControlGroup group = container.enableThroughputGlobalControlGroup( "group-" + UUID.randomUUID(), 10, controlContainer, Duration.ofSeconds(5), Duration.ofSeconds(10)); CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setContentResponseOnWriteEnabled(true); requestOptions.setThroughputControlGroupName(group.getGroupName()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(getDocumentDefinition(), requestOptions).block(); TestItem createdItem = createItemResponse.getItem(); this.validateRequestNotThrottled( createItemResponse.getDiagnostics().toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); CosmosDiagnostics cosmosDiagnostics = performDocumentOperation(operationType, createdItem, group.getGroupName()); this.validateRequestThrottled( cosmosDiagnostics.toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); } @BeforeClass(groups = { "emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ThroughputBudgetControllerTest() { client = getClientBuilder().buildAsyncClient(); database = getSharedCosmosDatabase(client); container = getSharedMultiPartitionCosmosContainer(client); } private static TestItem getDocumentDefinition() { return getDocumentDefinition(null); } private static TestItem getDocumentDefinition(String partitionKey) { return new TestItem( UUID.randomUUID().toString(), StringUtils.isEmpty(partitionKey) ? UUID.randomUUID().toString() : partitionKey, UUID.randomUUID().toString() ); } private void validateRequestThrottled(String cosmosDiagnostics, ConnectionMode connectionMode) { assertThat(cosmosDiagnostics).isNotEmpty(); if (connectionMode == ConnectionMode.DIRECT) { assertThat(cosmosDiagnostics).contains("\"statusCode\":429"); assertThat(cosmosDiagnostics).contains("\"subStatusCode\":10003"); } else if (connectionMode == ConnectionMode.GATEWAY) { assertThat(cosmosDiagnostics).contains("\"statusAndSubStatusCodes\":[[429,10003]"); } } private void validateRequestNotThrottled(String cosmosDiagnostics, ConnectionMode connectionMode) { assertThat(cosmosDiagnostics).isNotEmpty(); if (connectionMode == ConnectionMode.DIRECT) { assertThat(cosmosDiagnostics).doesNotContain("\"statusCode\":429"); assertThat(cosmosDiagnostics).doesNotContain("\"subStatusCode\":10003"); } else if (connectionMode == ConnectionMode.GATEWAY) { assertThat(cosmosDiagnostics).doesNotContain("\"statusAndSubStatusCodes\":[[429,10003]"); } } private CosmosDiagnostics performDocumentOperation(OperationType operationType, TestItem createdItem, String throughputControlGroup) { if (operationType == OperationType.Query) { CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); if (!StringUtils.isEmpty(throughputControlGroup)) { queryRequestOptions.setThroughputControlGroupName(throughputControlGroup); } String query = String.format("SELECT * from c where c.mypk = '%s'", createdItem.getMypk()); FeedResponse<TestItem> itemFeedResponse = container.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst(); return itemFeedResponse.getCosmosDiagnostics(); } if (operationType == OperationType.Read || operationType == OperationType.Delete || operationType == OperationType.Replace || operationType == OperationType.Create) { try { CosmosItemRequestOptions itemRequestOptions = new CosmosItemRequestOptions(); if (!StringUtils.isEmpty((throughputControlGroup))) { itemRequestOptions.setThroughputControlGroupName(throughputControlGroup); } if (operationType == OperationType.Read) { return container.readItem( createdItem.getId(), new PartitionKey(createdItem.getMypk()), itemRequestOptions, TestItem.class).block().getDiagnostics(); } if (operationType == OperationType.Replace) { return container.replaceItem( createdItem, createdItem.getId(), new PartitionKey(createdItem.getMypk()), itemRequestOptions).block().getDiagnostics(); } if (operationType == OperationType.Delete) { return container.deleteItem(createdItem, itemRequestOptions).block().getDiagnostics(); } if (operationType == OperationType.Create) { TestItem newItem = getDocumentDefinition(createdItem.getMypk()); return container.createItem(newItem, itemRequestOptions).block().getDiagnostics(); } } catch (CosmosException cosmosException) { return cosmosException.getDiagnostics(); } } throw new IllegalArgumentException("The operation type is not supported"); } }
class ThroughputControlTests extends TestSuiteBase { private final static int COLLECTION_RECREATION_TIME_DELAY = 5000; private CosmosAsyncClient client; private CosmosAsyncDatabase database; private CosmosAsyncContainer container; @Factory(dataProvider = "simpleClientBuildersForDirectTcpWithoutRetryOnThrottledRequests") public ThroughputControlTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); this.subscriberValidationTimeout = TIMEOUT; } @DataProvider public static Object[][] operationTypeProvider() { return new Object[][]{ { OperationType.Read }, { OperationType.Replace }, { OperationType.Create }, { OperationType.Delete }, { OperationType.Query }, }; } @Test(groups = {"emulator"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) @Test(groups = {"emulator"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void throughputGlobalControl(OperationType operationType) { String controlContainerId = "throughputControlContainer"; CosmosAsyncContainer controlContainer = database.getContainer(controlContainerId); database.createContainerIfNotExists(controlContainer.getId(), "/groupId").block(); ThroughputControlGroupConfig groupConfig = new ThroughputControlGroupConfigBuilder() .setGroupName("group-" + UUID.randomUUID()) .setTargetThroughput(6) .build(); GlobalThroughputControlConfig globalControlConfig = this.client.createGlobalThroughputControlConfigBuilder(this.database.getId(), controlContainerId) .setControlItemRenewInterval(Duration.ofSeconds(5)) .setControlItemExpireInterval(Duration.ofSeconds(20)) .build(); container.enableGlobalThroughputControlGroup(groupConfig, globalControlConfig); CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setContentResponseOnWriteEnabled(true); requestOptions.setThroughputControlGroupName(groupConfig.getGroupName()); CosmosItemResponse<TestItem> createItemResponse = container.createItem(getDocumentDefinition(), requestOptions).block(); TestItem createdItem = createItemResponse.getItem(); this.validateRequestNotThrottled( createItemResponse.getDiagnostics().toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); CosmosDiagnostics cosmosDiagnostics = performDocumentOperation(this.container, operationType, createdItem, groupConfig.getGroupName()); this.validateRequestThrottled( cosmosDiagnostics.toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); } @Test(groups = {"emulator"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void throughputLocalControlForContainerCreateDeleteWithSameName(OperationType operationType) throws InterruptedException { ConnectionMode connectionMode = BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode(); if (connectionMode == ConnectionMode.GATEWAY) { return; } String testContainerId = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(testContainerId); CosmosAsyncContainer createdContainer = createCollection(this.database, containerProperties, new CosmosContainerRequestOptions()); ThroughputControlGroupConfig groupConfig = new ThroughputControlGroupConfigBuilder() .setGroupName("group-" + UUID.randomUUID()) .setTargetThroughput(1) .build(); container.enableLocalThroughputControlGroup(groupConfig); createdContainer.enableLocalThroughputControlGroup(groupConfig); CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setContentResponseOnWriteEnabled(true); requestOptions.setThroughputControlGroupName(groupConfig.getGroupName()); CosmosItemResponse<TestItem> createItemResponse = createdContainer.createItem(getDocumentDefinition(), requestOptions).block(); TestItem createdItem = createItemResponse.getItem(); this.validateRequestNotThrottled( createItemResponse.getDiagnostics().toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); safeDeleteCollection(createdContainer); Thread.sleep(COLLECTION_RECREATION_TIME_DELAY); createdContainer = createCollection(this.database, containerProperties, new CosmosContainerRequestOptions()); createdItem = createdContainer.createItem(getDocumentDefinition()).block().getItem(); CosmosDiagnostics cosmosDiagnostics = performDocumentOperation(createdContainer, operationType, createdItem, groupConfig.getGroupName()); this.validateRequestNotThrottled( cosmosDiagnostics.toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); cosmosDiagnostics = performDocumentOperation(createdContainer, operationType, createdItem, groupConfig.getGroupName()); this.validateRequestThrottled( cosmosDiagnostics.toString(), BridgeInternal.getContextClient(client).getConnectionPolicy().getConnectionMode()); } @BeforeClass(groups = { "emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ThroughputBudgetControllerTest() { client = getClientBuilder().buildAsyncClient(); database = getSharedCosmosDatabase(client); container = getSharedMultiPartitionCosmosContainer(client); } private static TestItem getDocumentDefinition() { return getDocumentDefinition(null); } private static TestItem getDocumentDefinition(String partitionKey) { return new TestItem( UUID.randomUUID().toString(), StringUtils.isEmpty(partitionKey) ? UUID.randomUUID().toString() : partitionKey, UUID.randomUUID().toString() ); } private void validateRequestThrottled(String cosmosDiagnostics, ConnectionMode connectionMode) { assertThat(cosmosDiagnostics).isNotEmpty(); assertThat(cosmosDiagnostics).contains("\"statusCode\":429"); assertThat(cosmosDiagnostics).contains("\"subStatusCode\":10003"); } private void validateRequestNotThrottled(String cosmosDiagnostics, ConnectionMode connectionMode) { assertThat(cosmosDiagnostics).isNotEmpty(); assertThat(cosmosDiagnostics).doesNotContain("\"statusCode\":429"); assertThat(cosmosDiagnostics).doesNotContain("\"subStatusCode\":10003"); } private CosmosDiagnostics performDocumentOperation( CosmosAsyncContainer cosmosAsyncContainer, OperationType operationType, TestItem createdItem, String throughputControlGroup) { try { if (operationType == OperationType.Query) { CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); if (!StringUtils.isEmpty(throughputControlGroup)) { queryRequestOptions.setThroughputControlGroupName(throughputControlGroup); } String query = String.format("SELECT * from c where c.mypk = '%s'", createdItem.getMypk()); FeedResponse<TestItem> itemFeedResponse = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst(); return itemFeedResponse.getCosmosDiagnostics(); } if (operationType == OperationType.ReadFeed) { CosmosChangeFeedRequestOptions changeFeedRequestOptions = CosmosChangeFeedRequestOptions .createForProcessingFromBeginning(FeedRange.forFullRange()); if (!StringUtils.isEmpty(throughputControlGroup)) { changeFeedRequestOptions.setThroughputControlGroupName(throughputControlGroup); } FeedResponse<TestItem> itemFeedResponse = cosmosAsyncContainer.queryChangeFeed(changeFeedRequestOptions, TestItem.class).byPage().blockFirst(); return itemFeedResponse.getCosmosDiagnostics(); } if (operationType == OperationType.Read || operationType == OperationType.Delete || operationType == OperationType.Replace || operationType == OperationType.Create) { CosmosItemRequestOptions itemRequestOptions = new CosmosItemRequestOptions(); if (!StringUtils.isEmpty((throughputControlGroup))) { itemRequestOptions.setThroughputControlGroupName(throughputControlGroup); } if (operationType == OperationType.Read) { return cosmosAsyncContainer.readItem( createdItem.getId(), new PartitionKey(createdItem.getMypk()), itemRequestOptions, TestItem.class).block().getDiagnostics(); } if (operationType == OperationType.Replace) { return cosmosAsyncContainer.replaceItem( createdItem, createdItem.getId(), new PartitionKey(createdItem.getMypk()), itemRequestOptions).block().getDiagnostics(); } if (operationType == OperationType.Delete) { return cosmosAsyncContainer.deleteItem(createdItem, itemRequestOptions).block().getDiagnostics(); } if (operationType == OperationType.Create) { TestItem newItem = getDocumentDefinition(createdItem.getMypk()); return cosmosAsyncContainer.createItem(newItem, itemRequestOptions).block().getDiagnostics(); } } throw new IllegalArgumentException("The operation type is not supported"); } catch (CosmosException cosmosException) { return cosmosException.getDiagnostics(); } } }
non synchronized access to `childTokenSourceList` may cause concurrency access failure.
public void cancel() { if (this.cancellationRequested.compareAndSet(false, true)) { for (LinkedCancellationTokenSource childTokenSource : this.childTokenSourceList) { childTokenSource.close(); } childTokenSourceList.clear(); } }
childTokenSourceList.clear();
public void cancel() { synchronized (this) { if (this.cancellationRequested.compareAndSet(false, true)) { for (LinkedCancellationTokenSource childTokenSource : this.childTokenSourceList) { childTokenSource.close(); } childTokenSourceList.clear(); } } }
class LinkedCancellationToken { private final List<LinkedCancellationTokenSource> childTokenSourceList; private final LinkedCancellationTokenSource tokenSource; private final AtomicBoolean cancellationRequested; public LinkedCancellationToken(LinkedCancellationTokenSource tokenSource) { this.childTokenSourceList = new ArrayList<>(); this.tokenSource = tokenSource; cancellationRequested = new AtomicBoolean(false); } public synchronized void register(LinkedCancellationTokenSource childTokenSource) { this.childTokenSourceList.add(childTokenSource); } public boolean isCancellationRequested() { return this.cancellationRequested.get() || this.tokenSource.isClosed(); } }
class LinkedCancellationToken { private final List<LinkedCancellationTokenSource> childTokenSourceList; private final LinkedCancellationTokenSource tokenSource; private final AtomicBoolean cancellationRequested; public LinkedCancellationToken(LinkedCancellationTokenSource tokenSource) { this.childTokenSourceList = new ArrayList<>(); this.tokenSource = tokenSource; this.cancellationRequested = new AtomicBoolean(); } public void register(LinkedCancellationTokenSource childTokenSource) { synchronized (this) { if (this.cancellationRequested.get()) { throw new IllegalStateException("The cancellation token has been cancelled"); } this.childTokenSourceList.add(childTokenSource); } } public boolean isCancellationRequested() { return this.cancellationRequested.get() || this.tokenSource.isClosed(); } }
@xinlian12 thought?
public void cancel() { if (this.cancellationRequested.compareAndSet(false, true)) { for (LinkedCancellationTokenSource childTokenSource : this.childTokenSourceList) { childTokenSource.close(); } childTokenSourceList.clear(); } }
childTokenSourceList.clear();
public void cancel() { synchronized (this) { if (this.cancellationRequested.compareAndSet(false, true)) { for (LinkedCancellationTokenSource childTokenSource : this.childTokenSourceList) { childTokenSource.close(); } childTokenSourceList.clear(); } } }
class LinkedCancellationToken { private final List<LinkedCancellationTokenSource> childTokenSourceList; private final LinkedCancellationTokenSource tokenSource; private final AtomicBoolean cancellationRequested; public LinkedCancellationToken(LinkedCancellationTokenSource tokenSource) { this.childTokenSourceList = new ArrayList<>(); this.tokenSource = tokenSource; cancellationRequested = new AtomicBoolean(false); } public synchronized void register(LinkedCancellationTokenSource childTokenSource) { this.childTokenSourceList.add(childTokenSource); } public boolean isCancellationRequested() { return this.cancellationRequested.get() || this.tokenSource.isClosed(); } }
class LinkedCancellationToken { private final List<LinkedCancellationTokenSource> childTokenSourceList; private final LinkedCancellationTokenSource tokenSource; private final AtomicBoolean cancellationRequested; public LinkedCancellationToken(LinkedCancellationTokenSource tokenSource) { this.childTokenSourceList = new ArrayList<>(); this.tokenSource = tokenSource; this.cancellationRequested = new AtomicBoolean(); } public void register(LinkedCancellationTokenSource childTokenSource) { synchronized (this) { if (this.cancellationRequested.get()) { throw new IllegalStateException("The cancellation token has been cancelled"); } this.childTokenSourceList.add(childTokenSource); } } public boolean isCancellationRequested() { return this.cancellationRequested.get() || this.tokenSource.isClosed(); } }
@moderakh Changed to use synchronized
public void cancel() { if (this.cancellationRequested.compareAndSet(false, true)) { for (LinkedCancellationTokenSource childTokenSource : this.childTokenSourceList) { childTokenSource.close(); } childTokenSourceList.clear(); } }
childTokenSourceList.clear();
public void cancel() { synchronized (this) { if (this.cancellationRequested.compareAndSet(false, true)) { for (LinkedCancellationTokenSource childTokenSource : this.childTokenSourceList) { childTokenSource.close(); } childTokenSourceList.clear(); } } }
class LinkedCancellationToken { private final List<LinkedCancellationTokenSource> childTokenSourceList; private final LinkedCancellationTokenSource tokenSource; private final AtomicBoolean cancellationRequested; public LinkedCancellationToken(LinkedCancellationTokenSource tokenSource) { this.childTokenSourceList = new ArrayList<>(); this.tokenSource = tokenSource; cancellationRequested = new AtomicBoolean(false); } public synchronized void register(LinkedCancellationTokenSource childTokenSource) { this.childTokenSourceList.add(childTokenSource); } public boolean isCancellationRequested() { return this.cancellationRequested.get() || this.tokenSource.isClosed(); } }
class LinkedCancellationToken { private final List<LinkedCancellationTokenSource> childTokenSourceList; private final LinkedCancellationTokenSource tokenSource; private final AtomicBoolean cancellationRequested; public LinkedCancellationToken(LinkedCancellationTokenSource tokenSource) { this.childTokenSourceList = new ArrayList<>(); this.tokenSource = tokenSource; this.cancellationRequested = new AtomicBoolean(); } public void register(LinkedCancellationTokenSource childTokenSource) { synchronized (this) { if (this.cancellationRequested.get()) { throw new IllegalStateException("The cancellation token has been cancelled"); } this.childTokenSourceList.add(childTokenSource); } } public boolean isCancellationRequested() { return this.cancellationRequested.get() || this.tokenSource.isClosed(); } }
The 20 character limit is mentioned as a SHOULD NOT rather than MUST NOT which means it is not recommended but still allowed.
private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
if (name.length() > 20) {
private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
EG service doesn't require source (not sure about type), so customers may not be able to use this type with data that they are publishing through the REST endpoint. I'm not saying that we definitely shouldn't have the validation, but we probably need to make a call as to what the right thing to do is here across languages.
public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
"'source' and 'type' are mandatory attributes for a CloudEvent. " +
public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Is this check not specific to EventGrid ? Since we are putting in core, it should be common ?
private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
return false;
private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
If `name` have upper case letters, it will throw Exception but java doc says it will be converted into lowercase. What is expected behavior ?
public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; }
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
public CloudEvent addExtensionAttribute(String name, Object value) { Objects.requireNonNull(name, "'name' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Extension attribute 'name' must have only lower-case alphanumeric characters and not be one of the " + "CloudEvent reserved attribute names: " + String.join(",", RESERVED_ATTRIBUTE_NAMES))); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name, value); return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
"source" is required in the CloudEvent spec. We need to sync among all languages. https://github.com/cloudevents/spec/blob/v1.0.1/spec.md#required-attributes
public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
"'source' and 'type' are mandatory attributes for a CloudEvent. " +
public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
I think we should default the deserializer to JacksonAdapter if there's no JsonSerializer implementation found in the classpath.
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize(
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. * @throws IllegalStateException if source isn't in a URI-formatted string. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize the {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize the {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
CNCF CloudEvent Java SDK has this validation. Anyway, to make it consistent across all languages of Azure EG SDKs, I've removed it from Java.
private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
if (name.length() > 20) {
private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Now users have two options. `fromString(String cloudEventsJson)` and `fromString(String cloudEventsJson, bool skipValidation)`
public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
"'source' and 'type' are mandatory attributes for a CloudEvent. " +
public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
This is from CloudEvent specification. So it's not just EventGrid service.
private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
return false;
private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Good catch. Updated the java doc
public CloudEvent addExtensionAttribute(String name, Object value) { if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have at most 20 alphanumeric characters and not be one of the CloudEvent attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; }
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
public CloudEvent addExtensionAttribute(String name, Object value) { Objects.requireNonNull(name, "'name' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Extension attribute 'name' must have only lower-case alphanumeric characters and not be one of the " + "CloudEvent reserved attribute names: " + String.join(",", RESERVED_ATTRIBUTE_NAMES))); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name, value); return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private byte[] dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; private CloudEvent() { } public CloudEvent(String source, String type, Object data) { this(source, type, data, null); } public CloudEvent(String source, String type, Object data, String dataContentType) { this.source = source; this.type = type; if (data instanceof byte[]) { this.dataBase64 = Base64.getEncoder().encode((byte[]) data); } else { this.data = data; } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } /** * Deserialize the {@link CloudEvent} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it. */ public static List<CloudEvent> fromString(String cloudEventsJson) { try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); for (CloudEvent event : events) { if (event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. */ public CloudEvent setId(String id) { if (CoreUtils.isNullOrEmpty(id)) { throw new IllegalArgumentException("id cannot be null or empty"); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromBytes(this.dataBase64); } } return this.binaryData; } /** * Set the data content type with this event. * @param dataContentType the data content type to set. * @return the cloud event itself. */ public CloudEvent setDataContentType(String dataContentType) { this.dataContentType = dataContentType; return this; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get the spec version of this CloudEvent * @return the spec version, or null if the spec version was not set. */ public String getSpecVersion() { return specVersion; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. The property name will be transformed * to lowercase and must not share a name with any reserved cloud event properties. * @param name the name of the attribute. * @param value the value to associate with the name. * * @return the cloud event itself. */ @JsonAnySetter private static boolean validateAttributeName(String name) { if (name.length() > 20) { return false; } if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Good catch. This has no use since we checked whether it's lower case.
public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; }
this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value);
public CloudEvent addExtensionAttribute(String name, Object value) { Objects.requireNonNull(name, "'name' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Extension attribute 'name' must have only lower-case alphanumeric characters and not be one of the " + "CloudEvent reserved attribute names: " + String.join(",", RESERVED_ATTRIBUTE_NAMES))); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name, value); return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Updated CloudEvent to use JacksonAdapter if no JsonSerializer implementation is found.
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
List<CloudEvent> events = Arrays.asList(DESERIALIZER.deserialize(
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer DESERIALIZER = JsonSerializerProviders.createInstance(); private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. * @throws IllegalStateException if source isn't in a URI-formatted string. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize the {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize the {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
why do we store into the separate data field rather than directly into binaryData which is what is used in the public property?
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
this.data = data.toString();
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
nit: shouldn't need the toLowerCase here.
public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; }
this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value);
public CloudEvent addExtensionAttribute(String name, Object value) { Objects.requireNonNull(name, "'name' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Extension attribute 'name' must have only lower-case alphanumeric characters and not be one of the " + "CloudEvent reserved attribute names: " + String.join(",", RESERVED_ATTRIBUTE_NAMES))); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name, value); return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Should we allow format to be null and just fallback to `BYTES`?
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
if (Objects.isNull(format)) {
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Generally, we don't log non-nullable properties being null and instead just use `Object.requireNonNull(propertyX, "'propertyX' cannot be null.")`, this logging creates a lot of noise in a customers log store which may be unwanted (especially as this isn't a complex error).
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null."));
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Should use the generics supported type reference `new TypeReference<List<CloudEvent> { }`, and make it a constant on `CloudEvent`.
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
TypeReference.createInstance(CloudEvent[].class)));
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Let's invert this to make `skipValidation` an early out if it is true
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
if (!skipValidation) {
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Should we include anything else in this log error? If I saw this in my system with little to no context there may not be an easy way to understand how to fix it.
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
nit ```suggestion "'name' must have only lower-case alphanumeric characters and not be one of the CloudEvent reserved " ```
public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; }
"'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved "
public CloudEvent addExtensionAttribute(String name, Object value) { Objects.requireNonNull(name, "'name' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Extension attribute 'name' must have only lower-case alphanumeric characters and not be one of the " + "CloudEvent reserved attribute names: " + String.join(",", RESERVED_ATTRIBUTE_NAMES))); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name, value); return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
can use this -> `Object.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null.")`
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
}
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Also shouldn't null be `NullPointerException` ?
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
can use this -> Object.requireNonNull(id, "'id' cannot be null.")
public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; }
}
public CloudEvent setId(String id) { Objects.requireNonNull(id, "'id' cannot be null"); if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'id' cannot be empty")); } this.id = id; return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
This `data` field is for the serializer to serialize the object even though it's private. `binaryData` is ignored when a cloudEvent is serialized.
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
this.data = data.toString();
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
but the user can only pass binaryData, right? so it is already serialized.
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
this.data = data.toString();
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
I mean, the default JsonSerializer will serialize CloudEvent's binaryData to funny stuffs when the CloudEvent is serialized and sent out to service. Customized json serializer will work, but it's better that CloudEvent can be serialized with the default serializer.
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
this.data = data.toString();
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
I think this should be `IllegalArgumentException` because the input json String isn't null but wrong.
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { Objects.requireNonNull(cloudEventsJson, "'cloudEventsJson' cannot be null"); try { List<CloudEvent> events = SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), DESERIALIZER_TYPE_REFERENCE); if (skipValidation) { return events; } for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
I trust your English
public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; }
"'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved "
public CloudEvent addExtensionAttribute(String name, Object value) { Objects.requireNonNull(name, "'name' cannot be null."); Objects.requireNonNull(value, "'value' cannot be null."); if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Extension attribute 'name' must have only lower-case alphanumeric characters and not be one of the " + "CloudEvent reserved attribute names: " + String.join(",", RESERVED_ATTRIBUTE_NAMES))); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name, value); return this; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; } private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
I'd like it to be explicit from users or else there might be a hidden mistake that they don't realize. @srnagar, what do you think?
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { if (Objects.isNull(source)) { throw LOGGER.logExceptionAsError(new NullPointerException("'source' cannot be null.")); } if (Objects.isNull(type)) { throw LOGGER.logExceptionAsError(new NullPointerException("'type' cannot be null.")); } if (Objects.isNull(data)) { throw LOGGER.logExceptionAsError(new NullPointerException("'data' cannot be null.")); } if (Objects.isNull(format)) { throw LOGGER.logExceptionAsError(new NullPointerException("'format' cannot be null.")); } this.source = source; this.type = type; if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { this.data = data.toString(); } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; }
if (Objects.isNull(format)) {
public CloudEvent(String source, String type, BinaryData data, CloudEventDataFormat format, String dataContentType) { Objects.requireNonNull(source, "'source' cannot be null."); Objects.requireNonNull(type, "'type' cannot be null."); this.source = source; this.type = type; if (data != null) { Objects.requireNonNull(format, "'format' cannot be null when 'data' isn't null."); if (CloudEventDataFormat.BYTES == format) { this.dataBase64 = Base64.getEncoder().encodeToString(data.toBytes()); } else { try { this.data = BINARY_DATA_OBJECT_MAPPER.readTree(data.toBytes()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'data' isn't in valid Json format", e)); } } } this.dataContentType = dataContentType; this.id = UUID.randomUUID().toString(); this.specVersion = CloudEvent.SPEC_VERSION; this.binaryData = data; }
class CloudEvent { private static final String SPEC_VERSION = "1.0"; private static final JsonSerializer SERIALIZER; static { JsonSerializer tmp; try { tmp = JsonSerializerProviders.createInstance(); } catch (IllegalStateException e) { tmp = new JacksonSerializer(); } SERIALIZER = tmp; } private static final ClientLogger LOGGER = new ClientLogger(CloudEvent.class); private static final Set<String> RESERVED_ATTRIBUTE_NAMES = new HashSet<>(Arrays.asList( "specversion", "id", "source", "type", "datacontenttype", "dataschema", "subject", "time", "data" )); /* * An identifier for the event. The combination of id and source must be * unique for each distinct event. */ @JsonProperty(value = "id", required = true) private String id; /* * Identifies the context in which an event happened. The combination of id * and source must be unique for each distinct event. */ @JsonProperty(value = "source", required = true) private String source; /* * Event data specific to the event type. */ @JsonProperty(value = "data") private Object data; /* * Event data specific to the event type, encoded as a base64 string. */ @JsonProperty(value = "data_base64") private String dataBase64; /* * Type of event related to the originating occurrence. */ @JsonProperty(value = "type", required = true) private String type; /* * The time (in UTC) the event was generated, in RFC3339 format. */ @JsonProperty(value = "time") private OffsetDateTime time; /* * The version of the CloudEvents specification which the event uses. */ @JsonProperty(value = "specversion", required = true) private String specVersion; /* * Identifies the schema that data adheres to. */ @JsonProperty(value = "dataschema") private String dataSchema; /* * Content type of data value. */ @JsonProperty(value = "datacontenttype") private String dataContentType; /* * This describes the subject of the event in the context of the event * producer (identified by source). */ @JsonProperty(value = "subject") private String subject; @JsonIgnore private Map<String, Object> extensionAttributes; /* * Cache serialized data for getData() */ @JsonIgnore private BinaryData binaryData; /** * * @param source Identifies the context in which an event happened. The combination of id and source must be unique * for each distinct event. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData} that wraps the original data, which can be a String, byte[], or model class. * @param format Set to {@link CloudEventDataFormat * {@link CloudEventDataFormat * @param dataContentType The content type of the data. It has no impact on how the data is serialized but tells * the event subscriber how to use the data. Typically the value is of MIME types such as * "application/json", "text/plain", "text/xml", "application/+avro", etc. It can be null. * @throws NullPointerException if source, type, data, or format is null. */ private CloudEvent() { } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string and validate whether any CloudEvents have * null id', 'source', or 'type'. If you want to skip this validation, use {@link * @param cloudEventsJson the JSON payload containing one or more events. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a correct JSON string for a cloud event * or an array of it, or any deserialized CloudEvents have null 'id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson) { return fromString(cloudEventsJson, false); } /** * Deserialize a list of {@link CloudEvent CloudEvents} from a JSON string. * @param cloudEventsJson the JSON payload containing one or more events. * @param skipValidation set to true if you'd like to skip the validation for the deserialized CloudEvents. A valid * CloudEvent should have 'id', 'source' and 'type' not null. * * @return all of the events in the payload deserialized as {@link CloudEvent CloudEvents}. * @throws NullPointerException if cloudEventsJson is null. * @throws IllegalArgumentException if the input parameter isn't a JSON string for a cloud event or an array of it, * or skipValidation is false and any CloudEvents have null id', 'source', or 'type'. */ public static List<CloudEvent> fromString(String cloudEventsJson, boolean skipValidation) { if (cloudEventsJson == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'cloudEventsJson' cannot be null")); } try { List<CloudEvent> events = Arrays.asList(SERIALIZER.deserialize( new ByteArrayInputStream(cloudEventsJson.getBytes(StandardCharsets.UTF_8)), TypeReference.createInstance(CloudEvent[].class))); if (!skipValidation) { for (CloudEvent event : events) { if (event.getId() == null || event.getSource() == null || event.getType() == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'id', 'source' and 'type' are mandatory attributes for a CloudEvent. " + "Check if the input param is a JSON string for a CloudEvent or an array of it.")); } } } return events; } catch (UncheckedIOException uncheckedIOException) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The input parameter isn't a JSON string.", uncheckedIOException.getCause())); } } /** * Get the id of the cloud event. * @return the id. */ public String getId() { return this.id; } /** * Set a custom id. Note that a random id is already set by default. * @param id the id to set. * * @return the cloud event itself. * @throws NullPointerException if id is null. * @throws IllegalArgumentException if id is empty. */ public CloudEvent setId(String id) { if (Objects.isNull(id)) { throw LOGGER.logExceptionAsError(new NullPointerException("id cannot be null")); } if (id.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("id cannot be empty")); } this.id = id; return this; } /** * Get the URI source of the event. * @return the source. */ public String getSource() { return this.source; } /** * Get the data associated with this event as a {@link BinaryData}, which has API to deserialize the data into * a String, an Object, or a byte[]. * @return A {@link BinaryData} that wraps the this event's data payload. */ public BinaryData getData() { if (this.binaryData == null) { if (this.data != null) { if (this.data instanceof String) { this.binaryData = BinaryData.fromString((String) this.data); } else if (this.data instanceof byte[]) { this.binaryData = BinaryData.fromBytes((byte[]) this.data); } else { this.binaryData = BinaryData.fromObject(this.data, SERIALIZER); } } else if (this.dataBase64 != null) { this.binaryData = BinaryData.fromString(this.dataBase64); } } return this.binaryData; } /** * Get the type of event, e.g. "Contoso.Items.ItemReceived". * @return the type of the event. */ public String getType() { return this.type; } /** * Get the time associated with the occurrence of the event. * @return the event time, or null if the time is not set. */ public OffsetDateTime getTime() { return this.time; } /** * Set the time associated with the occurrence of the event. * @param time the time to set. * * @return the cloud event itself. */ public CloudEvent setTime(OffsetDateTime time) { this.time = time; return this; } /** * Get the content MIME type that the data is in. A null value indicates that the data is either nonexistent or in the * "application/json" type. Note that "application/json" is still a possible value for this field. * @return the content type the data is in, or null if the data is nonexistent or in "application/json" format. */ public String getDataContentType() { return this.dataContentType; } /** * Get the schema that the data adheres to. * @return a URI of the data schema, or null if it is not set. */ public String getDataSchema() { return this.dataSchema; } /** * Set the schema that the data adheres to. * @param dataSchema a URI identifying the schema of the data. * * @return the cloud event itself. */ public CloudEvent setDataSchema(String dataSchema) { this.dataSchema = dataSchema; return this; } /** * Get the subject associated with this event. * @return the subject, or null if the subject was not set. */ public String getSubject() { return this.subject; } /** * Set the subject of the event. * @param subject the subject to set. * * @return the cloud event itself. */ public CloudEvent setSubject(String subject) { this.subject = subject; return this; } /** * Get a map of the additional user-defined attributes associated with this event. * @return the extension attributes as an unmodifiable map. */ @JsonAnyGetter public Map<String, Object> getExtensionAttributes() { return this.extensionAttributes; } /** * Add/Overwrite a single extension attribute to the cloud event. * @param name the name of the attribute. It must contains only alphanumeric characters and not be be any * CloudEvent reserved attribute names. * @param value the value to associate with the name. * * @return the cloud event itself. * @throws IllegalArgumentException if name format isn't correct. */ @JsonAnySetter public CloudEvent addExtensionAttribute(String name, Object value) { if (Objects.isNull(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'name' cannot be null.")); } if (Objects.isNull(value)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'value' cannot be null.")); } if (!validateAttributeName(name)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'name' must have only small-case alphanumeric characters and not be one of the CloudEvent reserved " + "attribute names")); } if (this.extensionAttributes == null) { this.extensionAttributes = new HashMap<>(); } this.extensionAttributes.put(name.toLowerCase(Locale.ENGLISH), value); return this; } /** * Get the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test deserialization. * @return The spec version. */ String getSpecVersion() { return this.specVersion; } /** * Set the spec version. Users don't need to access it because it's always 1.0. * Make it package level to test serialization. * @return the cloud event itself. */ CloudEvent setSpecVersion(String specVersion) { this.specVersion = specVersion; return this; } private static boolean validateAttributeName(String name) { if (RESERVED_ATTRIBUTE_NAMES.contains(name)) { return false; } for (int i = 0; i < name.length(); i++) { char c = name.charAt(i); if (!((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9'))) { return false; } } return true; } static class JacksonSerializer implements JsonSerializer { private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); @Override public <T> T deserialize(InputStream stream, TypeReference<T> typeReference) { try { return jacksonAdapter.deserialize(stream, typeReference.getJavaType(), SerializerEncoding.JSON); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public <T> Mono<T> deserializeAsync(InputStream stream, TypeReference<T> typeReference) { return Mono.defer(() -> Mono.just(deserialize(stream, typeReference))); } @Override public void serialize(OutputStream stream, Object value) { try { jacksonAdapter.serialize(value, SerializerEncoding.JSON, stream); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } @Override public Mono<Void> serializeAsync(OutputStream stream, Object value) { return Mono.fromRunnable(() -> serialize(stream, value)); } JacksonAdapter getJacksonAdapter() { return jacksonAdapter; } } }
class accepts any String for compatibility with legacy systems. * @param type Type of event related to the originating occurrence. * @param data A {@link BinaryData}
Please take throughput to database level, that's what we are doing on sdk workload, and also if you need to add new container , you need not to configure throughput again.
public void createContainer() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final ThroughputProperties containerThroughput = createManualThroughput(_configuration.getThroughput()); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, PARTITION_KEY_PATH); database.createContainerIfNotExists(containerProperties, containerThroughput) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating container {}", containerName, e); throw e; } }
final ThroughputProperties containerThroughput = createManualThroughput(_configuration.getThroughput());
public void createContainer() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, PARTITION_KEY_PATH); database.createContainerIfNotExists(containerProperties) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating container {}", containerName, e); throw e; } }
class ResourceManagerImpl implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManagerImpl.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final CosmosAsyncClient _client; public ResourceManagerImpl(final Configuration configuration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " + "setting up the Database and containers for the test"); _configuration = configuration; _client = client; } @Override public void createDatabase() throws CosmosException { try { LOGGER.info("Creating database {} for the ctl workload", _configuration.getDatabaseId()); _client.createDatabaseIfNotExists(_configuration.getDatabaseId()) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); throw e; } deleteExistingContainers(); } @Override @Override public void deleteResources() { deleteExistingContainers(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Deleting the main database {} used in this test", _configuration.getDatabaseId()); database.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while deleting the database {}", _configuration.getDatabaseId(), e); throw e; } LOGGER.info("Resource cleanup completed"); } private void deleteExistingContainers() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List<CosmosAsyncContainer> cosmosAsyncContainers = database.readAllContainers() .byPage() .toStream() .flatMap(cosmosContainerPropertiesFeedResponse -> cosmosContainerPropertiesFeedResponse.getResults().stream()) .map(cosmosContainerProperties -> database.getContainer(cosmosContainerProperties.getId())) .collect(Collectors.toList()); for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Error deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } } }
class ResourceManagerImpl implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManagerImpl.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final CosmosAsyncClient _client; public ResourceManagerImpl(final Configuration configuration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " + "setting up the Database and containers for the test"); _configuration = configuration; _client = client; } @Override public void createDatabase() throws CosmosException { try { LOGGER.info("Creating database {} for the ctl workload", _configuration.getDatabaseId()); final ThroughputProperties throughputProperties = createManualThroughput(_configuration.getThroughput()); _client.createDatabaseIfNotExists(_configuration.getDatabaseId(), throughputProperties) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); throw e; } deleteExistingContainers(); } @Override @Override public void deleteResources() { deleteExistingContainers(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Deleting the main database {} used in this test", _configuration.getDatabaseId()); database.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while deleting the database {}", _configuration.getDatabaseId(), e); throw e; } LOGGER.info("Resource cleanup completed"); } private void deleteExistingContainers() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List<CosmosAsyncContainer> cosmosAsyncContainers = database.readAllContainers() .byPage() .toStream() .flatMap(cosmosContainerPropertiesFeedResponse -> cosmosContainerPropertiesFeedResponse.getResults().stream()) .map(cosmosContainerProperties -> database.getContainer(cosmosContainerProperties.getId())) .collect(Collectors.toList()); for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Error deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } } }
Updated in the latest revision
public void createContainer() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final ThroughputProperties containerThroughput = createManualThroughput(_configuration.getThroughput()); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, PARTITION_KEY_PATH); database.createContainerIfNotExists(containerProperties, containerThroughput) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating container {}", containerName, e); throw e; } }
final ThroughputProperties containerThroughput = createManualThroughput(_configuration.getThroughput());
public void createContainer() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, PARTITION_KEY_PATH); database.createContainerIfNotExists(containerProperties) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating container {}", containerName, e); throw e; } }
class ResourceManagerImpl implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManagerImpl.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final CosmosAsyncClient _client; public ResourceManagerImpl(final Configuration configuration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " + "setting up the Database and containers for the test"); _configuration = configuration; _client = client; } @Override public void createDatabase() throws CosmosException { try { LOGGER.info("Creating database {} for the ctl workload", _configuration.getDatabaseId()); _client.createDatabaseIfNotExists(_configuration.getDatabaseId()) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); throw e; } deleteExistingContainers(); } @Override @Override public void deleteResources() { deleteExistingContainers(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Deleting the main database {} used in this test", _configuration.getDatabaseId()); database.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while deleting the database {}", _configuration.getDatabaseId(), e); throw e; } LOGGER.info("Resource cleanup completed"); } private void deleteExistingContainers() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List<CosmosAsyncContainer> cosmosAsyncContainers = database.readAllContainers() .byPage() .toStream() .flatMap(cosmosContainerPropertiesFeedResponse -> cosmosContainerPropertiesFeedResponse.getResults().stream()) .map(cosmosContainerProperties -> database.getContainer(cosmosContainerProperties.getId())) .collect(Collectors.toList()); for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Error deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } } }
class ResourceManagerImpl implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManagerImpl.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final CosmosAsyncClient _client; public ResourceManagerImpl(final Configuration configuration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " + "setting up the Database and containers for the test"); _configuration = configuration; _client = client; } @Override public void createDatabase() throws CosmosException { try { LOGGER.info("Creating database {} for the ctl workload", _configuration.getDatabaseId()); final ThroughputProperties throughputProperties = createManualThroughput(_configuration.getThroughput()); _client.createDatabaseIfNotExists(_configuration.getDatabaseId(), throughputProperties) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); throw e; } deleteExistingContainers(); } @Override @Override public void deleteResources() { deleteExistingContainers(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Deleting the main database {} used in this test", _configuration.getDatabaseId()); database.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while deleting the database {}", _configuration.getDatabaseId(), e); throw e; } LOGGER.info("Resource cleanup completed"); } private void deleteExistingContainers() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List<CosmosAsyncContainer> cosmosAsyncContainers = database.readAllContainers() .byPage() .toStream() .flatMap(cosmosContainerPropertiesFeedResponse -> cosmosContainerPropertiesFeedResponse.getResults().stream()) .map(cosmosContainerProperties -> database.getContainer(cosmosContainerProperties.getId())) .collect(Collectors.toList()); for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Error deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } } }
I don't think this try-catch is required. Any exception thrown from `fromObject` method will be converted to an error signal in reactor.
public static Mono<BinaryData> fromObjectAsync(Object data) { try { return Mono.fromCallable(() -> fromObject(data)); } catch (RuntimeException runtimeException) { return Mono.error(runtimeException); } }
}
public static Mono<BinaryData> fromObjectAsync(Object data) { return Mono.fromCallable(() -> fromObject(data)); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final BinaryData EMPTY_DATA = new BinaryData(new byte[0]); private static final int STREAM_READ_SIZE = 1024; private static final Object LOCK = new Object(); private static volatile JsonSerializer defaultJsonSerializer; private final byte[] data; /** * Create an instance of {@link BinaryData} from the given byte array. * * @param data The byte array that {@link BinaryData} will represent. */ BinaryData(byte[] data) { this.data = data; } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static BinaryData fromStream(InputStream inputStream) { if (Objects.isNull(inputStream)) { return EMPTY_DATA; } try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return new BinaryData(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { try { return Mono.fromCallable(() -> fromStream(inputStream)); } catch (RuntimeException runtimeException) { return Mono.error(runtimeException); } } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This will collect all bytes from the {@link ByteBuffer ByteBuffers} resulting in {@link * ByteBuffer * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return Mono.just(EMPTY_DATA); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(new BinaryData(bytes))); } /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * <p> * If the {@code data} is null or a zero length string an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. */ public static BinaryData fromString(String data) { if (CoreUtils.isNullOrEmpty(data)) { return EMPTY_DATA; } return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. */ public static BinaryData fromBytes(byte[] data) { if (Objects.isNull(data) || data.length == 0) { return EMPTY_DATA; } return new BinaryData(Arrays.copyOf(data, data.length)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { if (Objects.isNull(data)) { return EMPTY_DATA; } final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); getDefaultSerializer().serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { if (Objects.isNull(data)) { return EMPTY_DATA; } Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { if (Objects.isNull(serializer)) { return monoError(LOGGER, new NullPointerException("'serializer' cannot be null.")); } return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Returns a byte array representation of this {@link BinaryData}. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), getDefaultSerializer()); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, getDefaultSerializer()); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, typeReference); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), getDefaultSerializer()); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { if (Objects.isNull(typeReference)) { return monoError(LOGGER, new NullPointerException("'typeReference' cannot be null.")); } return Mono.fromCallable(() -> toObject(typeReference)); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { if (Objects.isNull(typeReference)) { return monoError(LOGGER, new NullPointerException("'typeReference' cannot be null.")); } else if (Objects.isNull(serializer)) { return monoError(LOGGER, new NullPointerException("'serializer' cannot be null.")); } return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /* This will ensure lazy instantiation to avoid hard dependency on Json Serializer. */ private static JsonSerializer getDefaultSerializer() { if (defaultJsonSerializer == null) { synchronized (LOCK) { if (defaultJsonSerializer == null) { defaultJsonSerializer = JsonSerializerProviders.createInstance(); } } } return defaultJsonSerializer; } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final BinaryData EMPTY_DATA = new BinaryData(new byte[0]); private static final int STREAM_READ_SIZE = 1024; private static final Object LOCK = new Object(); private static volatile JsonSerializer defaultJsonSerializer; private final byte[] data; private String dataAsStringCache; /** * Create an instance of {@link BinaryData} from the given byte array. * * @param data The byte array that {@link BinaryData} will represent. */ BinaryData(byte[] data) { this.data = data; } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static BinaryData fromStream(InputStream inputStream) { if (Objects.isNull(inputStream)) { return EMPTY_DATA; } try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return new BinaryData(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <p> * If {@code inputStream} is null or empty an empty {@link BinaryData} is returned. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * <p> * If the {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> This will collect all bytes from the {@link ByteBuffer ByteBuffers} resulting in {@link * ByteBuffer * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return Mono.just(EMPTY_DATA); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(new BinaryData(bytes))); } /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * StandardCharsets * <p> * If the {@code data} is null or a zero length string an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a String</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. */ public static BinaryData fromString(String data) { if (CoreUtils.isNullOrEmpty(data)) { return EMPTY_DATA; } return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from a byte array</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. */ public static BinaryData fromBytes(byte[] data) { if (Objects.isNull(data) || data.length == 0) { return EMPTY_DATA; } return new BinaryData(Arrays.copyOf(data, data.length)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { if (Objects.isNull(data)) { return EMPTY_DATA; } final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); getDefaultSerializer().serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default {@link * JsonSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Creating an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { if (Objects.isNull(data)) { return EMPTY_DATA; } Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed {@link * ObjectSerializer}. * <p> * If {@code data} is null an empty {@link BinaryData} will be returned. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null and {@code data} is not null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Returns a byte array representation of this {@link BinaryData}. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { if (this.dataAsStringCache == null) { this.dataAsStringCache = new String(this.data, StandardCharsets.UTF_8); } return this.dataAsStringCache; } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), getDefaultSerializer()); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, getDefaultSerializer()); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, typeReference); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), getDefaultSerializer()); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * <b>Note:</b> A {@link JsonSerializer} implementation must be available on the classpath. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @throws IllegalStateException If a {@link JsonSerializer} implementation cannot be found on the classpath. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return Mono.fromCallable(() -> toObject(typeReference)); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use {@link * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use {@link * TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * {@codesnippet com.azure.core.util.BinaryData.toStream} * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /* This will ensure lazy instantiation to avoid hard dependency on Json Serializer. */ private static JsonSerializer getDefaultSerializer() { if (defaultJsonSerializer == null) { synchronized (LOCK) { if (defaultJsonSerializer == null) { defaultJsonSerializer = JsonSerializerProviders.createInstance(); } } } return defaultJsonSerializer; } }
>om", [](start = 18, length = 4) We are making type mandatory in .net, can do the same here: [https://github.com/Azure/azure-cosmos-dotnet-v3/pull/2283](https://github.com/Azure/azure-cosmos-dotnet-v3/pull/2283)
public EncryptionKeyWrapMetadata(String name, String value) { this("custom", name, value, null); }
this("custom", name, value, null);
public EncryptionKeyWrapMetadata(String name, String value) { this("custom", name, value, null); }
class EncryptionKeyWrapMetadata { /** * For JSON deserialize */ EncryptionKeyWrapMetadata() { } /** * Creates a new instance of key wrap metadata based on an existing instance. * * @param source Existing instance from which to initialize. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public EncryptionKeyWrapMetadata(EncryptionKeyWrapMetadata source) { this.type = source.type; this.algorithm = source.algorithm; this.value = source.value; } @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) EncryptionKeyWrapMetadata(String type, String name, String value) { this(type, name, value, null); } EncryptionKeyWrapMetadata(String type, String name, String value, String algorithm) { Preconditions.checkNotNull(type, "type is null"); Preconditions.checkNotNull(value, "value is null"); this.type = type; this.name = name; this.value = value; this.algorithm = algorithm; } @JsonProperty("type") @JsonInclude(JsonInclude.Include.NON_NULL) String type; @JsonProperty("algorithm") @JsonInclude(JsonInclude.Include.NON_NULL) String algorithm; @JsonProperty("value") @JsonInclude(JsonInclude.Include.NON_NULL) String value; @JsonProperty("name") @JsonInclude(JsonInclude.Include.NON_NULL) String name; /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return value of metadata */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getValue() { return value; } /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return name of metadata. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getName() { return name; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EncryptionKeyWrapMetadata that = (EncryptionKeyWrapMetadata) o; return Objects.equals(type, that.type) && Objects.equals(algorithm, that.algorithm) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(type, algorithm, value); } }
class EncryptionKeyWrapMetadata { /** * For JSON deserialize */ EncryptionKeyWrapMetadata() { } /** * Creates a new instance of key wrap metadata based on an existing instance. * * @param source Existing instance from which to initialize. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public EncryptionKeyWrapMetadata(EncryptionKeyWrapMetadata source) { this.type = source.type; this.algorithm = source.algorithm; this.value = source.value; } @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) EncryptionKeyWrapMetadata(String type, String name, String value) { this(type, name, value, null); } EncryptionKeyWrapMetadata(String type, String name, String value, String algorithm) { Preconditions.checkNotNull(type, "type is null"); Preconditions.checkNotNull(value, "value is null"); this.type = type; this.name = name; this.value = value; this.algorithm = algorithm; } @JsonProperty("type") @JsonInclude(JsonInclude.Include.NON_NULL) String type; @JsonProperty("algorithm") @JsonInclude(JsonInclude.Include.NON_NULL) String algorithm; @JsonProperty("value") @JsonInclude(JsonInclude.Include.NON_NULL) String value; @JsonProperty("name") @JsonInclude(JsonInclude.Include.NON_NULL) String name; /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return value of metadata */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getValue() { return value; } /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return name of metadata. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getName() { return name; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EncryptionKeyWrapMetadata that = (EncryptionKeyWrapMetadata) o; return Objects.equals(type, that.type) && Objects.equals(algorithm, that.algorithm) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(type, algorithm, value); } }
Thanks , captured in miscellaneous encryption work item which will land after preview release. https://github.com/Azure/azure-sdk-for-java/issues/19990
public EncryptionKeyWrapMetadata(String name, String value) { this("custom", name, value, null); }
this("custom", name, value, null);
public EncryptionKeyWrapMetadata(String name, String value) { this("custom", name, value, null); }
class EncryptionKeyWrapMetadata { /** * For JSON deserialize */ EncryptionKeyWrapMetadata() { } /** * Creates a new instance of key wrap metadata based on an existing instance. * * @param source Existing instance from which to initialize. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public EncryptionKeyWrapMetadata(EncryptionKeyWrapMetadata source) { this.type = source.type; this.algorithm = source.algorithm; this.value = source.value; } @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) EncryptionKeyWrapMetadata(String type, String name, String value) { this(type, name, value, null); } EncryptionKeyWrapMetadata(String type, String name, String value, String algorithm) { Preconditions.checkNotNull(type, "type is null"); Preconditions.checkNotNull(value, "value is null"); this.type = type; this.name = name; this.value = value; this.algorithm = algorithm; } @JsonProperty("type") @JsonInclude(JsonInclude.Include.NON_NULL) String type; @JsonProperty("algorithm") @JsonInclude(JsonInclude.Include.NON_NULL) String algorithm; @JsonProperty("value") @JsonInclude(JsonInclude.Include.NON_NULL) String value; @JsonProperty("name") @JsonInclude(JsonInclude.Include.NON_NULL) String name; /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return value of metadata */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getValue() { return value; } /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return name of metadata. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getName() { return name; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EncryptionKeyWrapMetadata that = (EncryptionKeyWrapMetadata) o; return Objects.equals(type, that.type) && Objects.equals(algorithm, that.algorithm) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(type, algorithm, value); } }
class EncryptionKeyWrapMetadata { /** * For JSON deserialize */ EncryptionKeyWrapMetadata() { } /** * Creates a new instance of key wrap metadata based on an existing instance. * * @param source Existing instance from which to initialize. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public EncryptionKeyWrapMetadata(EncryptionKeyWrapMetadata source) { this.type = source.type; this.algorithm = source.algorithm; this.value = source.value; } @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) EncryptionKeyWrapMetadata(String type, String name, String value) { this(type, name, value, null); } EncryptionKeyWrapMetadata(String type, String name, String value, String algorithm) { Preconditions.checkNotNull(type, "type is null"); Preconditions.checkNotNull(value, "value is null"); this.type = type; this.name = name; this.value = value; this.algorithm = algorithm; } @JsonProperty("type") @JsonInclude(JsonInclude.Include.NON_NULL) String type; @JsonProperty("algorithm") @JsonInclude(JsonInclude.Include.NON_NULL) String algorithm; @JsonProperty("value") @JsonInclude(JsonInclude.Include.NON_NULL) String value; @JsonProperty("name") @JsonInclude(JsonInclude.Include.NON_NULL) String name; /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return value of metadata */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getValue() { return value; } /** * Serialized form of metadata. * Note: This value is saved in the Cosmos DB service. * implementors of derived implementations should ensure that this does not have (private) key material or * credential information. * @return name of metadata. */ @Beta(value = Beta.SinceVersion.V4_13_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public String getName() { return name; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EncryptionKeyWrapMetadata that = (EncryptionKeyWrapMetadata) o; return Objects.equals(type, that.type) && Objects.equals(algorithm, that.algorithm) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(type, algorithm, value); } }
I have restarted the failed builds.
public void info(String message) { if (logger.isInfoEnabled()) { logger.info(sanitizeLogMessageInput(message)); } }
}
public void info(String message) { if (logger.isInfoEnabled()) { logger.info(sanitizeLogMessageInput(message)); } }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
999? leftover code for testing?
void populatePartitionedQueryMetrics() { String queryMetricsDelimitedString = pageResult.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { queryMetricsDelimitedString += String.format(Locale.ROOT, ";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.getRequestCharge()); ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap = new ImmutablePair<>("999", fetchSchedulingMetrics.getElapsedTime()); QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, new ClientSideMetrics(retries, pageResult.getRequestCharge(), fetchExecutionRangeAccumulator.getExecutionRanges(), Arrays.asList(schedulingTimeSpanMap) ), pageResult.getActivityId()); BridgeInternal.putQueryMetricsIntoMap(pageResult, "999", qm); } }
new ImmutablePair<>("999", fetchSchedulingMetrics.getElapsedTime());
void populatePartitionedQueryMetrics() { String queryMetricsDelimitedString = pageResult.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { queryMetricsDelimitedString += String.format(Locale.ROOT, ";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.getRequestCharge()); ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap = new ImmutablePair<>(feedRange.getRange().toString(), fetchSchedulingMetrics.getElapsedTime()); QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, new ClientSideMetrics(retries, pageResult.getRequestCharge(), fetchExecutionRangeAccumulator.getExecutionRanges(), Arrays.asList(schedulingTimeSpanMap) ), pageResult.getActivityId()); BridgeInternal.putQueryMetricsIntoMap(pageResult, feedRange.getRange().toString(), qm); } }
class DocumentProducerFeedResponse { FeedResponse<T> pageResult; FeedRangeEpkImpl sourceFeedRange; DocumentProducerFeedResponse(FeedResponse<T> pageResult) { this.pageResult = pageResult; this.sourceFeedRange = DocumentProducer.this.feedRange; populatePartitionedQueryMetrics(); } DocumentProducerFeedResponse(FeedResponse<T> pageResult, FeedRange feedRange) { this.pageResult = pageResult; this.sourceFeedRange = (FeedRangeEpkImpl) feedRange; populatePartitionedQueryMetrics(); } }
class DocumentProducerFeedResponse { FeedResponse<T> pageResult; FeedRangeEpkImpl sourceFeedRange; DocumentProducerFeedResponse(FeedResponse<T> pageResult) { this.pageResult = pageResult; this.sourceFeedRange = DocumentProducer.this.feedRange; populatePartitionedQueryMetrics(); } DocumentProducerFeedResponse(FeedResponse<T> pageResult, FeedRange feedRange) { this.pageResult = pageResult; this.sourceFeedRange = (FeedRangeEpkImpl) feedRange; populatePartitionedQueryMetrics(); } }
ditto
void populatePartitionedQueryMetrics() { String queryMetricsDelimitedString = pageResult.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { queryMetricsDelimitedString += String.format(Locale.ROOT, ";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.getRequestCharge()); ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap = new ImmutablePair<>("999", fetchSchedulingMetrics.getElapsedTime()); QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, new ClientSideMetrics(retries, pageResult.getRequestCharge(), fetchExecutionRangeAccumulator.getExecutionRanges(), Arrays.asList(schedulingTimeSpanMap) ), pageResult.getActivityId()); BridgeInternal.putQueryMetricsIntoMap(pageResult, "999", qm); } }
BridgeInternal.putQueryMetricsIntoMap(pageResult, "999", qm);
void populatePartitionedQueryMetrics() { String queryMetricsDelimitedString = pageResult.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { queryMetricsDelimitedString += String.format(Locale.ROOT, ";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.getRequestCharge()); ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap = new ImmutablePair<>(feedRange.getRange().toString(), fetchSchedulingMetrics.getElapsedTime()); QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, new ClientSideMetrics(retries, pageResult.getRequestCharge(), fetchExecutionRangeAccumulator.getExecutionRanges(), Arrays.asList(schedulingTimeSpanMap) ), pageResult.getActivityId()); BridgeInternal.putQueryMetricsIntoMap(pageResult, feedRange.getRange().toString(), qm); } }
class DocumentProducerFeedResponse { FeedResponse<T> pageResult; FeedRangeEpkImpl sourceFeedRange; DocumentProducerFeedResponse(FeedResponse<T> pageResult) { this.pageResult = pageResult; this.sourceFeedRange = DocumentProducer.this.feedRange; populatePartitionedQueryMetrics(); } DocumentProducerFeedResponse(FeedResponse<T> pageResult, FeedRange feedRange) { this.pageResult = pageResult; this.sourceFeedRange = (FeedRangeEpkImpl) feedRange; populatePartitionedQueryMetrics(); } }
class DocumentProducerFeedResponse { FeedResponse<T> pageResult; FeedRangeEpkImpl sourceFeedRange; DocumentProducerFeedResponse(FeedResponse<T> pageResult) { this.pageResult = pageResult; this.sourceFeedRange = DocumentProducer.this.feedRange; populatePartitionedQueryMetrics(); } DocumentProducerFeedResponse(FeedResponse<T> pageResult, FeedRange feedRange) { this.pageResult = pageResult; this.sourceFeedRange = (FeedRangeEpkImpl) feedRange; populatePartitionedQueryMetrics(); } }
is this still relevant?
public Flux<OrderByRowResult<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.flatMap(documentProducerFeedResponse -> { clientSideRequestStatisticsList.addAll( BridgeInternal.getClientSideRequestStatisticsList(documentProducerFeedResponse .pageResult.getCosmosDiagnostics())); for (String key : BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult) .keySet()) { if (queryMetricsMap.containsKey(key)) { QueryMetrics qm = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key); queryMetricsMap.get(key).add(qm); } else { queryMetricsMap.put(key, BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key)); } } List<T> results = documentProducerFeedResponse.pageResult.getResults(); OrderByContinuationToken orderByContinuationToken = targetRangeToOrderByContinuationTokenMap.get("1"); if (orderByContinuationToken != null) { Pair<Boolean, ResourceId> booleanResourceIdPair = ResourceId.tryParse(orderByContinuationToken.getRid()); if (!booleanResourceIdPair.getLeft()) { return Flux.error(new BadRequestException(String.format("INVALID Rid in the continuation token %s for OrderBy~Context.", orderByContinuationToken.getCompositeContinuationToken().getToken()))); } ResourceId continuationTokenRid = booleanResourceIdPair.getRight(); results = results.stream() .filter(tOrderByRowResult -> { List<QueryItem> queryItems = new ArrayList<QueryItem>(); ArrayNode arrayNode = (ArrayNode)ModelBridgeInternal.getObjectFromJsonSerializable(tOrderByRowResult, "orderByItems"); for (JsonNode jsonNode : arrayNode) { QueryItem queryItem = new QueryItem(jsonNode.toString()); queryItems.add(queryItem); } long cmp = 0; for (int i = 0; i < sortOrders.size(); i++) { cmp = ItemComparator.getInstance().compare(orderByContinuationToken.getOrderByItems()[i].getItem(), queryItems.get(i).getItem()); if (cmp != 0) { cmp = sortOrders.get(i).equals(SortOrder.Descending) ? -cmp : cmp; break; } } if (cmp == 0) { cmp = (continuationTokenRid.getDocument() - ResourceId.tryParse(tOrderByRowResult.getResourceId()).getRight().getDocument()); if (sortOrders.iterator().next().equals(SortOrder.Descending)) { cmp = -cmp; } return (cmp <= 0); } return true; }) .collect(Collectors.toList()); } tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); Flux<T> x = Flux.fromIterable(results); return x.map(r -> new OrderByRowResult<T>( klass, ModelBridgeInternal.toJsonFromJsonSerializable(r), null, documentProducerFeedResponse.pageResult.getContinuationToken())); }, 1); }
new BadRequestException(String.format("INVALID Rid in the continuation token %s for OrderBy~Context.", orderByContinuationToken.getCompositeContinuationToken().getToken()))); } ResourceId continuationTokenRid = booleanResourceIdPair.getRight(); results = results.stream() .filter(tOrderByRowResult -> { List<QueryItem> queryItems = new ArrayList<QueryItem>(); ArrayNode arrayNode = (ArrayNode)ModelBridgeInternal.getObjectFromJsonSerializable(tOrderByRowResult, "orderByItems"); for (JsonNode jsonNode : arrayNode) { QueryItem queryItem = new QueryItem(jsonNode.toString()); queryItems.add(queryItem); } long cmp = 0; for (int i = 0; i < sortOrders.size(); i++) { cmp = ItemComparator.getInstance().compare(orderByContinuationToken.getOrderByItems()[i].getItem(), queryItems.get(i).getItem()); if (cmp != 0) { cmp = sortOrders.get(i).equals(SortOrder.Descending) ? -cmp : cmp; break; } } if (cmp == 0) { cmp = (continuationTokenRid.getDocument() - ResourceId.tryParse(tOrderByRowResult.getResourceId()).getRight().getDocument()); if (sortOrders.iterator().next().equals(SortOrder.Descending)) { cmp = -cmp; } return (cmp <= 0); } return true; }
class PageToItemTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<OrderByRowResult<T>>> { private final RequestChargeTracker tracker; private final Class<T> klass; private final Map<String, QueryMetrics> queryMetricsMap; private final Map<String, OrderByContinuationToken> targetRangeToOrderByContinuationTokenMap; private final List<SortOrder> sortOrders; private final List<ClientSideRequestStatistics> clientSideRequestStatisticsList; public PageToItemTransformer( Class<T> klass, RequestChargeTracker tracker, Map<String, QueryMetrics> queryMetricsMap, Map<String, OrderByContinuationToken> targetRangeToOrderByContinuationTokenMap, List<SortOrder> sortOrders, List<ClientSideRequestStatistics> clientSideRequestStatisticsList) { this.klass = klass; this.tracker = tracker; this.queryMetricsMap = queryMetricsMap; this.targetRangeToOrderByContinuationTokenMap = targetRangeToOrderByContinuationTokenMap; this.sortOrders = sortOrders; this.clientSideRequestStatisticsList = clientSideRequestStatisticsList; } @Override public Flux<OrderByRowResult<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.flatMap(documentProducerFeedResponse -> { clientSideRequestStatisticsList.addAll( BridgeInternal.getClientSideRequestStatisticsList(documentProducerFeedResponse .pageResult.getCosmosDiagnostics())); for (String key : BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult) .keySet()) { if (queryMetricsMap.containsKey(key)) { QueryMetrics qm = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key); queryMetricsMap.get(key).add(qm); } else { queryMetricsMap.put(key, BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key)); } } List<T> results = documentProducerFeedResponse.pageResult.getResults(); OrderByContinuationToken orderByContinuationToken = targetRangeToOrderByContinuationTokenMap.get("1"); if (orderByContinuationToken != null) { Pair<Boolean, ResourceId> booleanResourceIdPair = ResourceId.tryParse(orderByContinuationToken.getRid()); if (!booleanResourceIdPair.getLeft()) { return Flux.error() .collect(Collectors.toList()); } tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); Flux<T> x = Flux.fromIterable(results); return x.map(r -> new OrderByRowResult<T>( klass, ModelBridgeInternal.toJsonFromJsonSerializable(r), null, documentProducerFeedResponse.pageResult.getContinuationToken())); }, 1); } }
class PageToItemTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<OrderByRowResult<T>>> { private final RequestChargeTracker tracker; private final Class<T> klass; private final Map<String, QueryMetrics> queryMetricsMap; private final Map<FeedRangeEpkImpl, OrderByContinuationToken> targetRangeToOrderByContinuationTokenMap; private final List<SortOrder> sortOrders; private final List<ClientSideRequestStatistics> clientSideRequestStatisticsList; public PageToItemTransformer( Class<T> klass, RequestChargeTracker tracker, Map<String, QueryMetrics> queryMetricsMap, Map<FeedRangeEpkImpl, OrderByContinuationToken> targetRangeToOrderByContinuationTokenMap, List<SortOrder> sortOrders, List<ClientSideRequestStatistics> clientSideRequestStatisticsList) { this.klass = klass; this.tracker = tracker; this.queryMetricsMap = queryMetricsMap; this.targetRangeToOrderByContinuationTokenMap = targetRangeToOrderByContinuationTokenMap; this.sortOrders = sortOrders; this.clientSideRequestStatisticsList = clientSideRequestStatisticsList; } @Override public Flux<OrderByRowResult<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.flatMap(documentProducerFeedResponse -> { clientSideRequestStatisticsList.addAll( BridgeInternal.getClientSideRequestStatisticsList(documentProducerFeedResponse .pageResult.getCosmosDiagnostics())); for (String key : BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult) .keySet()) { if (queryMetricsMap.containsKey(key)) { QueryMetrics qm = BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key); queryMetricsMap.get(key).add(qm); } else { queryMetricsMap.put(key, BridgeInternal.queryMetricsFromFeedResponse(documentProducerFeedResponse.pageResult).get(key)); } } List<T> results = documentProducerFeedResponse.pageResult.getResults(); OrderByContinuationToken orderByContinuationToken = targetRangeToOrderByContinuationTokenMap.get(documentProducerFeedResponse.sourceFeedRange); if (orderByContinuationToken != null) { Pair<Boolean, ResourceId> booleanResourceIdPair = ResourceId.tryParse(orderByContinuationToken.getRid()); if (!booleanResourceIdPair.getLeft()) { return Flux.error() .collect(Collectors.toList()); } tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); Flux<T> x = Flux.fromIterable(results); return x.map(r -> new OrderByRowResult<T>( klass, ModelBridgeInternal.toJsonFromJsonSerializable(r), documentProducerFeedResponse.sourceFeedRange, documentProducerFeedResponse.pageResult.getContinuationToken())); }, 1); } }
Yeah. Need to delete some of the old partitionrange related stuff. For now just added some place holders. Need to see what to do with client side metrics/scheduling metrics. They might go away too as .net did
void populatePartitionedQueryMetrics() { String queryMetricsDelimitedString = pageResult.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { queryMetricsDelimitedString += String.format(Locale.ROOT, ";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.getRequestCharge()); ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap = new ImmutablePair<>("999", fetchSchedulingMetrics.getElapsedTime()); QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, new ClientSideMetrics(retries, pageResult.getRequestCharge(), fetchExecutionRangeAccumulator.getExecutionRanges(), Arrays.asList(schedulingTimeSpanMap) ), pageResult.getActivityId()); BridgeInternal.putQueryMetricsIntoMap(pageResult, "999", qm); } }
new ImmutablePair<>("999", fetchSchedulingMetrics.getElapsedTime());
void populatePartitionedQueryMetrics() { String queryMetricsDelimitedString = pageResult.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS); if (!StringUtils.isEmpty(queryMetricsDelimitedString)) { queryMetricsDelimitedString += String.format(Locale.ROOT, ";%s=%.2f", QueryMetricsConstants.RequestCharge, pageResult.getRequestCharge()); ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap = new ImmutablePair<>(feedRange.getRange().toString(), fetchSchedulingMetrics.getElapsedTime()); QueryMetrics qm =BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(queryMetricsDelimitedString, new ClientSideMetrics(retries, pageResult.getRequestCharge(), fetchExecutionRangeAccumulator.getExecutionRanges(), Arrays.asList(schedulingTimeSpanMap) ), pageResult.getActivityId()); BridgeInternal.putQueryMetricsIntoMap(pageResult, feedRange.getRange().toString(), qm); } }
class DocumentProducerFeedResponse { FeedResponse<T> pageResult; FeedRangeEpkImpl sourceFeedRange; DocumentProducerFeedResponse(FeedResponse<T> pageResult) { this.pageResult = pageResult; this.sourceFeedRange = DocumentProducer.this.feedRange; populatePartitionedQueryMetrics(); } DocumentProducerFeedResponse(FeedResponse<T> pageResult, FeedRange feedRange) { this.pageResult = pageResult; this.sourceFeedRange = (FeedRangeEpkImpl) feedRange; populatePartitionedQueryMetrics(); } }
class DocumentProducerFeedResponse { FeedResponse<T> pageResult; FeedRangeEpkImpl sourceFeedRange; DocumentProducerFeedResponse(FeedResponse<T> pageResult) { this.pageResult = pageResult; this.sourceFeedRange = DocumentProducer.this.feedRange; populatePartitionedQueryMetrics(); } DocumentProducerFeedResponse(FeedResponse<T> pageResult, FeedRange feedRange) { this.pageResult = pageResult; this.sourceFeedRange = (FeedRangeEpkImpl) feedRange; populatePartitionedQueryMetrics(); } }
format
protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest().authenticated() .and() .apply(configurer); }
.anyRequest().authenticated()
protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest().authenticated() .and() .apply(configurer); }
class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } }
class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } }
put `and()` to next line will be better
public void init(HttpSecurity http) throws Exception { http.logout() .logoutSuccessHandler(handler) .and() .oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(resolver).and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()); }
.authorizationRequestResolver(resolver).and()
public void init(HttpSecurity http) throws Exception { http.logout() .logoutSuccessHandler(handler) .and() .oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(resolver) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()); }
class AADB2COidcLoginConfigurer extends AbstractHttpConfigurer<AADB2COidcLoginConfigurer, HttpSecurity> { private final AADB2CLogoutSuccessHandler handler; private final AADB2CAuthorizationRequestResolver resolver; public AADB2COidcLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { this.handler = handler; this.resolver = resolver; } @Override protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADB2COAuth2AuthzCodeGrantRequestEntityConverter()); return result; } }
class AADB2COidcLoginConfigurer extends AbstractHttpConfigurer<AADB2COidcLoginConfigurer, HttpSecurity> { private final AADB2CLogoutSuccessHandler handler; private final AADB2CAuthorizationRequestResolver resolver; public AADB2COidcLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { this.handler = handler; this.resolver = resolver; } @Override protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter(new AADB2COAuth2AuthorizationCodeGrantRequestEntityConverter()); return result; } }
Seems like it could fit into one line
protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADB2COAuth2AuthzCodeGrantRequestEntityConverter()); return result; }
new AADB2COAuth2AuthzCodeGrantRequestEntityConverter());
protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter(new AADB2COAuth2AuthorizationCodeGrantRequestEntityConverter()); return result; }
class AADB2COidcLoginConfigurer extends AbstractHttpConfigurer<AADB2COidcLoginConfigurer, HttpSecurity> { private final AADB2CLogoutSuccessHandler handler; private final AADB2CAuthorizationRequestResolver resolver; public AADB2COidcLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { this.handler = handler; this.resolver = resolver; } @Override public void init(HttpSecurity http) throws Exception { http.logout() .logoutSuccessHandler(handler) .and() .oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(resolver).and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()); } }
class AADB2COidcLoginConfigurer extends AbstractHttpConfigurer<AADB2COidcLoginConfigurer, HttpSecurity> { private final AADB2CLogoutSuccessHandler handler; private final AADB2CAuthorizationRequestResolver resolver; public AADB2COidcLoginConfigurer(AADB2CLogoutSuccessHandler handler, AADB2CAuthorizationRequestResolver resolver) { this.handler = handler; this.resolver = resolver; } @Override public void init(HttpSecurity http) throws Exception { http.logout() .logoutSuccessHandler(handler) .and() .oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(resolver) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()); } }
we could remove the `isEmpty` check here, I think just `contains` is sufficient.
public void afterPropertiesSet() { if (StringUtils.isEmpty(tenant) && StringUtils.isEmpty(baseUri)) { throw new AADB2CConfigurationException("'tenant' and 'baseUri' at least configure one item."); } if (!userFlows.isEmpty() && userFlows.contains(signInUserFlow)) { throw new AADB2CConfigurationException("Sign in user flow '" + signInUserFlow + "' does not need to be configured repeatedly."); } }
if (!userFlows.isEmpty() && userFlows.contains(signInUserFlow)) {
public void afterPropertiesSet() { if (StringUtils.isEmpty(tenant) && StringUtils.isEmpty(baseUri)) { throw new AADB2CConfigurationException("'tenant' and 'baseUri' at least configure one item."); } if (!userFlows.keySet().contains(loginFlow)) { throw new AADB2CConfigurationException("Sign in user flow key '" + loginFlow + "' is not in 'user-flows' map."); } }
class AADB2CProperties implements InitializingBean { public static final String USER_FLOWS = "user-flows"; /** * We do not use ${@link String * as it's not real constant, which cannot be referenced in annotation. */ public static final String SIGN_IN_USER_FLOW = "sign-in-user-flow"; public static final String DEFAULT_LOGOUT_SUCCESS_URL = "http: public static final String PREFIX = "azure.activedirectory.b2c"; private static final String TENANT_NAME_PART_REGEX = "([A-Za-z0-9]+\\.)"; /** * The name of the b2c tenant. * @deprecated It's recommended to use 'baseUri' instead. */ @Deprecated private String tenant; /** * The application ID that registered under b2c tenant. */ @NotBlank(message = "client ID should not be blank") private String clientId; /** * The application secret that registered under b2c tenant. */ @NotBlank(message = "client secret should not be blank") private String clientSecret; @URL(message = "logout success should be valid URL") private String logoutSuccessUrl = DEFAULT_LOGOUT_SUCCESS_URL; private Map<String, Object> authenticateAdditionalParameters; /** * User name attribute name */ private String userNameAttributeName; /** * Telemetry data will be collected if true, or disable data collection. */ private boolean allowTelemetry = true; private String replyUrl = "{baseUrl}/login/oauth2/code/"; /** * AAD B2C endpoint base uri. */ @URL(message = "baseUri should be valid URL") private String baseUri; /** * The all user flows which is created under b2c tenant. */ private List<String> userFlows = new ArrayList<>(); /** * Specify the primary sign in flow name */ @NotBlank(message = "The primary sign in flow name should not be blank.") private String signInUserFlow; @Override public String getBaseUri() { if (StringUtils.hasText(tenant) && StringUtils.isEmpty(baseUri)) { return String.format("https: } return baseUri; } public void setBaseUri(String baseUri) { this.baseUri = baseUri; } public void setTenant(String tenant) { this.tenant = tenant; } /** * Get tenant name for Telemetry * @return tenant name * @throws AADB2CConfigurationException resolve tenant name failed */ @DeprecatedConfigurationProperty( reason = "Configuration updated to baseUri", replacement = "azure.activedirectory.b2c.base-uri") public String getTenant() { if (StringUtils.hasText(baseUri)) { Matcher matcher = Pattern.compile(TENANT_NAME_PART_REGEX).matcher(baseUri); if (matcher.find()) { String matched = matcher.group(); return matched.substring(0, matched.length() - 1); } throw new AADB2CConfigurationException("Unable to resolve the 'tenant' name."); } return tenant; } public List<String> getUserFlows() { return userFlows; } public void setUserFlows(List<String> userFlows) { this.userFlows = userFlows; } public String getSignInUserFlow() { return signInUserFlow; } public void setSignInUserFlow(String signInUserFlow) { this.signInUserFlow = signInUserFlow; } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public String getClientSecret() { return clientSecret; } public void setClientSecret(String clientSecret) { this.clientSecret = clientSecret; } public String getLogoutSuccessUrl() { return logoutSuccessUrl; } public void setLogoutSuccessUrl(String logoutSuccessUrl) { this.logoutSuccessUrl = logoutSuccessUrl; } public Map<String, Object> getAuthenticateAdditionalParameters() { return authenticateAdditionalParameters; } public void setAuthenticateAdditionalParameters(Map<String, Object> authenticateAdditionalParameters) { this.authenticateAdditionalParameters = authenticateAdditionalParameters; } public boolean isAllowTelemetry() { return allowTelemetry; } public void setAllowTelemetry(boolean allowTelemetry) { this.allowTelemetry = allowTelemetry; } public String getUserNameAttributeName() { return userNameAttributeName; } public void setUserNameAttributeName(String userNameAttributeName) { this.userNameAttributeName = userNameAttributeName; } public String getReplyUrl() { return replyUrl; } public void setReplyUrl(String replyUrl) { this.replyUrl = replyUrl; } }
class AADB2CProperties implements InitializingBean { public static final String DEFAULT_LOGOUT_SUCCESS_URL = "http: public static final String PREFIX = "azure.activedirectory.b2c"; private static final String TENANT_NAME_PART_REGEX = "([A-Za-z0-9]+\\.)"; /** * The default user flow key 'sign-up-or-sign-in'. */ protected static final String DEFAULT_KEY_SIGN_UP_OR_SIGN_IN = "sign-up-or-sign-in"; /** * The default user flow key 'password-reset'. */ protected static final String DEFAULT_KEY_PASSWORD_RESET = "password-reset"; /** * The name of the b2c tenant. * @deprecated It's recommended to use 'baseUri' instead. */ @Deprecated private String tenant; /** * The name of the b2c tenant id. */ private String tenantId; /** * App ID URI which might be used in the <code>"aud"</code> claim of an token. */ private String appIdUri; /** * Connection Timeout for the JWKSet Remote URL call. */ private int jwtConnectTimeout = RemoteJWKSet.DEFAULT_HTTP_CONNECT_TIMEOUT; /* milliseconds */ /** * Read Timeout for the JWKSet Remote URL call. */ private int jwtReadTimeout = RemoteJWKSet.DEFAULT_HTTP_READ_TIMEOUT; /* milliseconds */ /** * Size limit in Bytes of the JWKSet Remote URL call. */ private int jwtSizeLimit = RemoteJWKSet.DEFAULT_HTTP_SIZE_LIMIT; /* bytes */ /** * The application ID that registered under b2c tenant. */ @NotBlank(message = "client ID should not be blank") private String clientId; /** * The application secret that registered under b2c tenant. */ @NotBlank(message = "client secret should not be blank") private String clientSecret; @URL(message = "logout success should be valid URL") private String logoutSuccessUrl = DEFAULT_LOGOUT_SUCCESS_URL; private Map<String, Object> authenticateAdditionalParameters; /** * User name attribute name */ private String userNameAttributeName; /** * Telemetry data will be collected if true, or disable data collection. */ private boolean allowTelemetry = true; private String replyUrl = "{baseUrl}/login/oauth2/code/"; /** * AAD B2C endpoint base uri. */ @URL(message = "baseUri should be valid URL") private String baseUri; /** * Specify the primary sign in flow key. */ private String loginFlow = DEFAULT_KEY_SIGN_UP_OR_SIGN_IN; private Map<String, String> userFlows = new HashMap<>(); @Override protected String getPasswordReset() { Optional<String> keyOptional = userFlows.keySet() .stream() .filter(key -> key.equalsIgnoreCase(DEFAULT_KEY_PASSWORD_RESET)) .findAny(); return keyOptional.isPresent() ? userFlows.get(keyOptional.get()) : null; } public String getBaseUri() { if (StringUtils.hasText(tenant) && StringUtils.isEmpty(baseUri)) { return String.format("https: } return baseUri; } public void setBaseUri(String baseUri) { this.baseUri = baseUri; } public void setTenant(String tenant) { this.tenant = tenant; } /** * Get tenant name for Telemetry * @return tenant name * @throws AADB2CConfigurationException resolve tenant name failed */ @DeprecatedConfigurationProperty( reason = "Configuration updated to baseUri", replacement = "azure.activedirectory.b2c.base-uri") public String getTenant() { if (StringUtils.hasText(baseUri)) { Matcher matcher = Pattern.compile(TENANT_NAME_PART_REGEX).matcher(baseUri); if (matcher.find()) { String matched = matcher.group(); return matched.substring(0, matched.length() - 1); } throw new AADB2CConfigurationException("Unable to resolve the 'tenant' name."); } return tenant; } public Map<String, String> getUserFlows() { return userFlows; } public void setUserFlows(Map<String, String> userFlows) { this.userFlows = userFlows; } public String getLoginFlow() { return loginFlow; } public void setLoginFlow(String loginFlow) { this.loginFlow = loginFlow; } public String getClientId() { return clientId; } public void setClientId(String clientId) { this.clientId = clientId; } public String getClientSecret() { return clientSecret; } public void setClientSecret(String clientSecret) { this.clientSecret = clientSecret; } public String getLogoutSuccessUrl() { return logoutSuccessUrl; } public void setLogoutSuccessUrl(String logoutSuccessUrl) { this.logoutSuccessUrl = logoutSuccessUrl; } public Map<String, Object> getAuthenticateAdditionalParameters() { return authenticateAdditionalParameters; } public void setAuthenticateAdditionalParameters(Map<String, Object> authenticateAdditionalParameters) { this.authenticateAdditionalParameters = authenticateAdditionalParameters; } public boolean isAllowTelemetry() { return allowTelemetry; } public void setAllowTelemetry(boolean allowTelemetry) { this.allowTelemetry = allowTelemetry; } public String getUserNameAttributeName() { return userNameAttributeName; } public void setUserNameAttributeName(String userNameAttributeName) { this.userNameAttributeName = userNameAttributeName; } public String getReplyUrl() { return replyUrl; } public void setReplyUrl(String replyUrl) { this.replyUrl = replyUrl; } public String getAppIdUri() { return appIdUri; } public void setAppIdUri(String appIdUri) { this.appIdUri = appIdUri; } public int getJwtConnectTimeout() { return jwtConnectTimeout; } public void setJwtConnectTimeout(int jwtConnectTimeout) { this.jwtConnectTimeout = jwtConnectTimeout; } public int getJwtReadTimeout() { return jwtReadTimeout; } public void setJwtReadTimeout(int jwtReadTimeout) { this.jwtReadTimeout = jwtReadTimeout; } public int getJwtSizeLimit() { return jwtSizeLimit; } public void setJwtSizeLimit(int jwtSizeLimit) { this.jwtSizeLimit = jwtSizeLimit; } public String getTenantId() { return tenantId; } public void setTenantId(String tenantId) { this.tenantId = tenantId; } }
use `logger.logExceptionAsError()`
public ConnectionStringProperties(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw new IllegalArgumentException("'connectionString' cannot be an empty string."); } final String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); URI endpoint = null; String entityPath = null; String sharedAccessKeyName = null; String sharedAccessKeyValue = null; String sharedAccessSignature = null; for (String tokenValuePair : tokenValuePairs) { final String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPARATOR, 2); if (pair.length != 2) { throw new IllegalArgumentException(String.format( Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } final String key = pair[0].trim(); final String value = pair[1].trim(); if (key.equalsIgnoreCase(ENDPOINT)) { final String endpointUri = validateAndUpdateDefaultScheme(value); try { endpoint = new URI(endpointUri); } catch (URISyntaxException e) { throw new IllegalArgumentException( String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { sharedAccessKeyName = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY)) { sharedAccessKeyValue = value; } else if (key.equalsIgnoreCase(ENTITY_PATH)) { entityPath = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE) && value.toLowerCase(Locale.ROOT).startsWith(SAS_VALUE_PREFIX)) { sharedAccessSignature = value; } else { throw new IllegalArgumentException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } boolean includesSharedKey = sharedAccessKeyName != null || sharedAccessKeyValue != null; boolean hasSharedKeyAndValue = sharedAccessKeyName != null && sharedAccessKeyValue != null; boolean includesSharedAccessSignature = sharedAccessSignature != null; if (endpoint == null || (includesSharedKey && includesSharedAccessSignature) || (!hasSharedKeyAndValue && !includesSharedAccessSignature)) { throw new IllegalArgumentException(ERROR_MESSAGE_FORMAT); } this.endpoint = endpoint; this.entityPath = entityPath; this.sharedAccessKeyName = sharedAccessKeyName; this.sharedAccessKey = sharedAccessKeyValue; this.sharedAccessSignature = sharedAccessSignature; }
throw new IllegalArgumentException(ERROR_MESSAGE_FORMAT);
public ConnectionStringProperties(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw new IllegalArgumentException("'connectionString' cannot be an empty string."); } final String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); URI endpoint = null; String entityPath = null; String sharedAccessKeyName = null; String sharedAccessKeyValue = null; String sharedAccessSignature = null; for (String tokenValuePair : tokenValuePairs) { final String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPARATOR, 2); if (pair.length != 2) { throw new IllegalArgumentException(String.format( Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } final String key = pair[0].trim(); final String value = pair[1].trim(); if (key.equalsIgnoreCase(ENDPOINT)) { final String endpointUri = validateAndUpdateDefaultScheme(value); try { endpoint = new URI(endpointUri); } catch (URISyntaxException e) { throw new IllegalArgumentException( String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { sharedAccessKeyName = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY)) { sharedAccessKeyValue = value; } else if (key.equalsIgnoreCase(ENTITY_PATH)) { entityPath = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE) && value.toLowerCase(Locale.ROOT).startsWith(SAS_VALUE_PREFIX)) { sharedAccessSignature = value; } else { throw new IllegalArgumentException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } boolean includesSharedKey = sharedAccessKeyName != null || sharedAccessKeyValue != null; boolean hasSharedKeyAndValue = sharedAccessKeyName != null && sharedAccessKeyValue != null; boolean includesSharedAccessSignature = sharedAccessSignature != null; if (endpoint == null || (includesSharedKey && includesSharedAccessSignature) || (!hasSharedKeyAndValue && !includesSharedAccessSignature)) { throw logger.logExceptionAsError(new IllegalArgumentException(ERROR_MESSAGE_FORMAT)); } this.endpoint = endpoint; this.entityPath = entityPath; this.sharedAccessKeyName = sharedAccessKeyName; this.sharedAccessKey = sharedAccessKeyValue; this.sharedAccessSignature = sharedAccessSignature; }
class ConnectionStringProperties { private final ClientLogger logger = new ClientLogger(ConnectionStringProperties.class); private static final String TOKEN_VALUE_SEPARATOR = "="; private static final String ENDPOINT_SCHEME_SB_PREFIX = "sb: private static final String ENDPOINT_SCHEME_HTTP_PREFIX = "http: private static final String ENDPOINT_SCHEME_HTTPS_PREFIX = "https: private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"; private static final String SAS_VALUE_PREFIX = "sharedaccesssignature "; private static final String ENTITY_PATH = "EntityPath"; private static final String CONNECTION_STRING_WITH_ACCESS_KEY = "Endpoint={endpoint};" + "SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}"; private static final String CONNECTION_STRING_WITH_SAS = "Endpoint={endpoint};SharedAccessSignature=" + "SharedAccessSignature {sharedAccessSignature};EntityPath={entityPath}"; private static final String ERROR_MESSAGE_FORMAT = "Could not parse 'connectionString'. Expected format: " + CONNECTION_STRING_WITH_ACCESS_KEY + " or " + CONNECTION_STRING_WITH_SAS + "."; private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; /** * Creates a new instance by parsing the {@code connectionString} into its components. * @param connectionString The connection string to the Event Hub instance. * * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if {@code connectionString} is an empty string or the connection string has * an invalid format. */ /** * Gets the endpoint to be used for connecting to the AMQP message broker. * @return The endpoint address, including protocol, from the connection string. */ public URI getEndpoint() { return endpoint; } /** * Gets the entity path to connect to in the message broker. * @return The entity path to connect to in the message broker. */ public String getEntityPath() { return entityPath; } /** * Gets the name of the shared access key, either for the Event Hubs namespace or the Event Hub instance. * @return The name of the shared access key. */ public String getSharedAccessKeyName() { return sharedAccessKeyName; } /** * The value of the shared access key, either for the Event Hubs namespace or the Event Hub. * @return The value of the shared access key. */ public String getSharedAccessKey() { return sharedAccessKey; } /** * The value of the shared access signature, if the connection string used to create this instance included the * shared access signature component. * @return The shared access signature value, if included in the connection string. */ public String getSharedAccessSignature() { return sharedAccessSignature; } /* * The function checks for pre existing scheme of "sb: * in endpoint, it will set the default scheme to "sb: */ private String validateAndUpdateDefaultScheme(final String endpoint) { if (CoreUtils.isNullOrEmpty(endpoint)) { throw logger.logExceptionAsError(new IllegalArgumentException( "'Endpoint' must be provided in 'connectionString'.")); } String updatedEndpoint = endpoint.trim(); final String endpointLowerCase = endpoint.toLowerCase(Locale.getDefault()); if (!endpointLowerCase.startsWith(ENDPOINT_SCHEME_SB_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTP_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTPS_PREFIX)) { updatedEndpoint = ENDPOINT_SCHEME_SB_PREFIX + endpoint; } return updatedEndpoint; } }
class ConnectionStringProperties { private final ClientLogger logger = new ClientLogger(ConnectionStringProperties.class); private static final String TOKEN_VALUE_SEPARATOR = "="; private static final String ENDPOINT_SCHEME_SB_PREFIX = "sb: private static final String ENDPOINT_SCHEME_HTTP_PREFIX = "http: private static final String ENDPOINT_SCHEME_HTTPS_PREFIX = "https: private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"; private static final String SAS_VALUE_PREFIX = "sharedaccesssignature "; private static final String ENTITY_PATH = "EntityPath"; private static final String CONNECTION_STRING_WITH_ACCESS_KEY = "Endpoint={endpoint};" + "SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}"; private static final String CONNECTION_STRING_WITH_SAS = "Endpoint={endpoint};SharedAccessSignature=" + "SharedAccessSignature {sharedAccessSignature};EntityPath={entityPath}"; private static final String ERROR_MESSAGE_FORMAT = String.format(Locale.US, "Could not parse 'connectionString'. Expected format: %s or %s.", CONNECTION_STRING_WITH_ACCESS_KEY, CONNECTION_STRING_WITH_SAS); private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; /** * Creates a new instance by parsing the {@code connectionString} into its components. * @param connectionString The connection string to the Event Hub instance. * * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if {@code connectionString} is an empty string or the connection string has * an invalid format. */ /** * Gets the endpoint to be used for connecting to the AMQP message broker. * @return The endpoint address, including protocol, from the connection string. */ public URI getEndpoint() { return endpoint; } /** * Gets the entity path to connect to in the message broker. * @return The entity path to connect to in the message broker. */ public String getEntityPath() { return entityPath; } /** * Gets the name of the shared access key, either for the Event Hubs namespace or the Event Hub instance. * @return The name of the shared access key. */ public String getSharedAccessKeyName() { return sharedAccessKeyName; } /** * The value of the shared access key, either for the Event Hubs namespace or the Event Hub. * @return The value of the shared access key. */ public String getSharedAccessKey() { return sharedAccessKey; } /** * The value of the shared access signature, if the connection string used to create this instance included the * shared access signature component. * @return The shared access signature value, if included in the connection string. */ public String getSharedAccessSignature() { return sharedAccessSignature; } /* * The function checks for pre existing scheme of "sb: * in endpoint, it will set the default scheme to "sb: */ private String validateAndUpdateDefaultScheme(final String endpoint) { if (CoreUtils.isNullOrEmpty(endpoint)) { throw logger.logExceptionAsError(new IllegalArgumentException( "'Endpoint' must be provided in 'connectionString'.")); } final String endpointLowerCase = endpoint.trim().toLowerCase(Locale.ROOT); if (!endpointLowerCase.startsWith(ENDPOINT_SCHEME_SB_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTP_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTPS_PREFIX)) { return ENDPOINT_SCHEME_SB_PREFIX + endpoint; } return endpointLowerCase; } }
This should use `updatedEndpoint` instead.
private String validateAndUpdateDefaultScheme(final String endpoint) { if (CoreUtils.isNullOrEmpty(endpoint)) { throw logger.logExceptionAsError(new IllegalArgumentException( "'Endpoint' must be provided in 'connectionString'.")); } String updatedEndpoint = endpoint.trim(); final String endpointLowerCase = endpoint.toLowerCase(Locale.getDefault()); if (!endpointLowerCase.startsWith(ENDPOINT_SCHEME_SB_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTP_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTPS_PREFIX)) { updatedEndpoint = ENDPOINT_SCHEME_SB_PREFIX + endpoint; } return updatedEndpoint; }
final String endpointLowerCase = endpoint.toLowerCase(Locale.getDefault());
private String validateAndUpdateDefaultScheme(final String endpoint) { if (CoreUtils.isNullOrEmpty(endpoint)) { throw logger.logExceptionAsError(new IllegalArgumentException( "'Endpoint' must be provided in 'connectionString'.")); } final String endpointLowerCase = endpoint.trim().toLowerCase(Locale.ROOT); if (!endpointLowerCase.startsWith(ENDPOINT_SCHEME_SB_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTP_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTPS_PREFIX)) { return ENDPOINT_SCHEME_SB_PREFIX + endpoint; } return endpointLowerCase; }
class ConnectionStringProperties { private final ClientLogger logger = new ClientLogger(ConnectionStringProperties.class); private static final String TOKEN_VALUE_SEPARATOR = "="; private static final String ENDPOINT_SCHEME_SB_PREFIX = "sb: private static final String ENDPOINT_SCHEME_HTTP_PREFIX = "http: private static final String ENDPOINT_SCHEME_HTTPS_PREFIX = "https: private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"; private static final String SAS_VALUE_PREFIX = "sharedaccesssignature "; private static final String ENTITY_PATH = "EntityPath"; private static final String CONNECTION_STRING_WITH_ACCESS_KEY = "Endpoint={endpoint};" + "SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}"; private static final String CONNECTION_STRING_WITH_SAS = "Endpoint={endpoint};SharedAccessSignature=" + "SharedAccessSignature {sharedAccessSignature};EntityPath={entityPath}"; private static final String ERROR_MESSAGE_FORMAT = "Could not parse 'connectionString'. Expected format: " + CONNECTION_STRING_WITH_ACCESS_KEY + " or " + CONNECTION_STRING_WITH_SAS + "."; private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; /** * Creates a new instance by parsing the {@code connectionString} into its components. * @param connectionString The connection string to the Event Hub instance. * * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if {@code connectionString} is an empty string or the connection string has * an invalid format. */ public ConnectionStringProperties(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw new IllegalArgumentException("'connectionString' cannot be an empty string."); } final String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); URI endpoint = null; String entityPath = null; String sharedAccessKeyName = null; String sharedAccessKeyValue = null; String sharedAccessSignature = null; for (String tokenValuePair : tokenValuePairs) { final String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPARATOR, 2); if (pair.length != 2) { throw new IllegalArgumentException(String.format( Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } final String key = pair[0].trim(); final String value = pair[1].trim(); if (key.equalsIgnoreCase(ENDPOINT)) { final String endpointUri = validateAndUpdateDefaultScheme(value); try { endpoint = new URI(endpointUri); } catch (URISyntaxException e) { throw new IllegalArgumentException( String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { sharedAccessKeyName = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY)) { sharedAccessKeyValue = value; } else if (key.equalsIgnoreCase(ENTITY_PATH)) { entityPath = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE) && value.toLowerCase(Locale.ROOT).startsWith(SAS_VALUE_PREFIX)) { sharedAccessSignature = value; } else { throw new IllegalArgumentException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } boolean includesSharedKey = sharedAccessKeyName != null || sharedAccessKeyValue != null; boolean hasSharedKeyAndValue = sharedAccessKeyName != null && sharedAccessKeyValue != null; boolean includesSharedAccessSignature = sharedAccessSignature != null; if (endpoint == null || (includesSharedKey && includesSharedAccessSignature) || (!hasSharedKeyAndValue && !includesSharedAccessSignature)) { throw new IllegalArgumentException(ERROR_MESSAGE_FORMAT); } this.endpoint = endpoint; this.entityPath = entityPath; this.sharedAccessKeyName = sharedAccessKeyName; this.sharedAccessKey = sharedAccessKeyValue; this.sharedAccessSignature = sharedAccessSignature; } /** * Gets the endpoint to be used for connecting to the AMQP message broker. * @return The endpoint address, including protocol, from the connection string. */ public URI getEndpoint() { return endpoint; } /** * Gets the entity path to connect to in the message broker. * @return The entity path to connect to in the message broker. */ public String getEntityPath() { return entityPath; } /** * Gets the name of the shared access key, either for the Event Hubs namespace or the Event Hub instance. * @return The name of the shared access key. */ public String getSharedAccessKeyName() { return sharedAccessKeyName; } /** * The value of the shared access key, either for the Event Hubs namespace or the Event Hub. * @return The value of the shared access key. */ public String getSharedAccessKey() { return sharedAccessKey; } /** * The value of the shared access signature, if the connection string used to create this instance included the * shared access signature component. * @return The shared access signature value, if included in the connection string. */ public String getSharedAccessSignature() { return sharedAccessSignature; } /* * The function checks for pre existing scheme of "sb: * in endpoint, it will set the default scheme to "sb: */ }
class ConnectionStringProperties { private final ClientLogger logger = new ClientLogger(ConnectionStringProperties.class); private static final String TOKEN_VALUE_SEPARATOR = "="; private static final String ENDPOINT_SCHEME_SB_PREFIX = "sb: private static final String ENDPOINT_SCHEME_HTTP_PREFIX = "http: private static final String ENDPOINT_SCHEME_HTTPS_PREFIX = "https: private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"; private static final String SAS_VALUE_PREFIX = "sharedaccesssignature "; private static final String ENTITY_PATH = "EntityPath"; private static final String CONNECTION_STRING_WITH_ACCESS_KEY = "Endpoint={endpoint};" + "SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}"; private static final String CONNECTION_STRING_WITH_SAS = "Endpoint={endpoint};SharedAccessSignature=" + "SharedAccessSignature {sharedAccessSignature};EntityPath={entityPath}"; private static final String ERROR_MESSAGE_FORMAT = String.format(Locale.US, "Could not parse 'connectionString'. Expected format: %s or %s.", CONNECTION_STRING_WITH_ACCESS_KEY, CONNECTION_STRING_WITH_SAS); private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; /** * Creates a new instance by parsing the {@code connectionString} into its components. * @param connectionString The connection string to the Event Hub instance. * * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if {@code connectionString} is an empty string or the connection string has * an invalid format. */ public ConnectionStringProperties(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw new IllegalArgumentException("'connectionString' cannot be an empty string."); } final String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); URI endpoint = null; String entityPath = null; String sharedAccessKeyName = null; String sharedAccessKeyValue = null; String sharedAccessSignature = null; for (String tokenValuePair : tokenValuePairs) { final String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPARATOR, 2); if (pair.length != 2) { throw new IllegalArgumentException(String.format( Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } final String key = pair[0].trim(); final String value = pair[1].trim(); if (key.equalsIgnoreCase(ENDPOINT)) { final String endpointUri = validateAndUpdateDefaultScheme(value); try { endpoint = new URI(endpointUri); } catch (URISyntaxException e) { throw new IllegalArgumentException( String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { sharedAccessKeyName = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY)) { sharedAccessKeyValue = value; } else if (key.equalsIgnoreCase(ENTITY_PATH)) { entityPath = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE) && value.toLowerCase(Locale.ROOT).startsWith(SAS_VALUE_PREFIX)) { sharedAccessSignature = value; } else { throw new IllegalArgumentException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } boolean includesSharedKey = sharedAccessKeyName != null || sharedAccessKeyValue != null; boolean hasSharedKeyAndValue = sharedAccessKeyName != null && sharedAccessKeyValue != null; boolean includesSharedAccessSignature = sharedAccessSignature != null; if (endpoint == null || (includesSharedKey && includesSharedAccessSignature) || (!hasSharedKeyAndValue && !includesSharedAccessSignature)) { throw logger.logExceptionAsError(new IllegalArgumentException(ERROR_MESSAGE_FORMAT)); } this.endpoint = endpoint; this.entityPath = entityPath; this.sharedAccessKeyName = sharedAccessKeyName; this.sharedAccessKey = sharedAccessKeyValue; this.sharedAccessSignature = sharedAccessSignature; } /** * Gets the endpoint to be used for connecting to the AMQP message broker. * @return The endpoint address, including protocol, from the connection string. */ public URI getEndpoint() { return endpoint; } /** * Gets the entity path to connect to in the message broker. * @return The entity path to connect to in the message broker. */ public String getEntityPath() { return entityPath; } /** * Gets the name of the shared access key, either for the Event Hubs namespace or the Event Hub instance. * @return The name of the shared access key. */ public String getSharedAccessKeyName() { return sharedAccessKeyName; } /** * The value of the shared access key, either for the Event Hubs namespace or the Event Hub. * @return The value of the shared access key. */ public String getSharedAccessKey() { return sharedAccessKey; } /** * The value of the shared access signature, if the connection string used to create this instance included the * shared access signature component. * @return The shared access signature value, if included in the connection string. */ public String getSharedAccessSignature() { return sharedAccessSignature; } /* * The function checks for pre existing scheme of "sb: * in endpoint, it will set the default scheme to "sb: */ }
Is there a reason not to chain this and create a local variable? Also, not to use `Locale.getDefault()`. These endpoints are of invariant culture. It's possible that the user's machine has another locale.
private String validateAndUpdateDefaultScheme(final String endpoint) { if (CoreUtils.isNullOrEmpty(endpoint)) { throw logger.logExceptionAsError(new IllegalArgumentException( "'Endpoint' must be provided in 'connectionString'.")); } String updatedEndpoint = endpoint.trim(); final String endpointLowerCase = updatedEndpoint.toLowerCase(Locale.getDefault()); if (!endpointLowerCase.startsWith(ENDPOINT_SCHEME_SB_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTP_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTPS_PREFIX)) { updatedEndpoint = ENDPOINT_SCHEME_SB_PREFIX + endpoint; } return updatedEndpoint; }
String updatedEndpoint = endpoint.trim();
private String validateAndUpdateDefaultScheme(final String endpoint) { if (CoreUtils.isNullOrEmpty(endpoint)) { throw logger.logExceptionAsError(new IllegalArgumentException( "'Endpoint' must be provided in 'connectionString'.")); } final String endpointLowerCase = endpoint.trim().toLowerCase(Locale.ROOT); if (!endpointLowerCase.startsWith(ENDPOINT_SCHEME_SB_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTP_PREFIX) && !endpointLowerCase.startsWith(ENDPOINT_SCHEME_HTTPS_PREFIX)) { return ENDPOINT_SCHEME_SB_PREFIX + endpoint; } return endpointLowerCase; }
class ConnectionStringProperties { private final ClientLogger logger = new ClientLogger(ConnectionStringProperties.class); private static final String TOKEN_VALUE_SEPARATOR = "="; private static final String ENDPOINT_SCHEME_SB_PREFIX = "sb: private static final String ENDPOINT_SCHEME_HTTP_PREFIX = "http: private static final String ENDPOINT_SCHEME_HTTPS_PREFIX = "https: private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"; private static final String SAS_VALUE_PREFIX = "sharedaccesssignature "; private static final String ENTITY_PATH = "EntityPath"; private static final String CONNECTION_STRING_WITH_ACCESS_KEY = "Endpoint={endpoint};" + "SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}"; private static final String CONNECTION_STRING_WITH_SAS = "Endpoint={endpoint};SharedAccessSignature=" + "SharedAccessSignature {sharedAccessSignature};EntityPath={entityPath}"; private static final String ERROR_MESSAGE_FORMAT = "Could not parse 'connectionString'. Expected format: " + CONNECTION_STRING_WITH_ACCESS_KEY + " or " + CONNECTION_STRING_WITH_SAS + "."; private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; /** * Creates a new instance by parsing the {@code connectionString} into its components. * @param connectionString The connection string to the Event Hub instance. * * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if {@code connectionString} is an empty string or the connection string has * an invalid format. */ public ConnectionStringProperties(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw new IllegalArgumentException("'connectionString' cannot be an empty string."); } final String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); URI endpoint = null; String entityPath = null; String sharedAccessKeyName = null; String sharedAccessKeyValue = null; String sharedAccessSignature = null; for (String tokenValuePair : tokenValuePairs) { final String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPARATOR, 2); if (pair.length != 2) { throw new IllegalArgumentException(String.format( Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } final String key = pair[0].trim(); final String value = pair[1].trim(); if (key.equalsIgnoreCase(ENDPOINT)) { final String endpointUri = validateAndUpdateDefaultScheme(value); try { endpoint = new URI(endpointUri); } catch (URISyntaxException e) { throw new IllegalArgumentException( String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { sharedAccessKeyName = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY)) { sharedAccessKeyValue = value; } else if (key.equalsIgnoreCase(ENTITY_PATH)) { entityPath = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE) && value.toLowerCase(Locale.ROOT).startsWith(SAS_VALUE_PREFIX)) { sharedAccessSignature = value; } else { throw new IllegalArgumentException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } boolean includesSharedKey = sharedAccessKeyName != null || sharedAccessKeyValue != null; boolean hasSharedKeyAndValue = sharedAccessKeyName != null && sharedAccessKeyValue != null; boolean includesSharedAccessSignature = sharedAccessSignature != null; if (endpoint == null || (includesSharedKey && includesSharedAccessSignature) || (!hasSharedKeyAndValue && !includesSharedAccessSignature)) { throw logger.logExceptionAsError(new IllegalArgumentException(ERROR_MESSAGE_FORMAT)); } this.endpoint = endpoint; this.entityPath = entityPath; this.sharedAccessKeyName = sharedAccessKeyName; this.sharedAccessKey = sharedAccessKeyValue; this.sharedAccessSignature = sharedAccessSignature; } /** * Gets the endpoint to be used for connecting to the AMQP message broker. * @return The endpoint address, including protocol, from the connection string. */ public URI getEndpoint() { return endpoint; } /** * Gets the entity path to connect to in the message broker. * @return The entity path to connect to in the message broker. */ public String getEntityPath() { return entityPath; } /** * Gets the name of the shared access key, either for the Event Hubs namespace or the Event Hub instance. * @return The name of the shared access key. */ public String getSharedAccessKeyName() { return sharedAccessKeyName; } /** * The value of the shared access key, either for the Event Hubs namespace or the Event Hub. * @return The value of the shared access key. */ public String getSharedAccessKey() { return sharedAccessKey; } /** * The value of the shared access signature, if the connection string used to create this instance included the * shared access signature component. * @return The shared access signature value, if included in the connection string. */ public String getSharedAccessSignature() { return sharedAccessSignature; } /* * The function checks for pre existing scheme of "sb: * in endpoint, it will set the default scheme to "sb: */ }
class ConnectionStringProperties { private final ClientLogger logger = new ClientLogger(ConnectionStringProperties.class); private static final String TOKEN_VALUE_SEPARATOR = "="; private static final String ENDPOINT_SCHEME_SB_PREFIX = "sb: private static final String ENDPOINT_SCHEME_HTTP_PREFIX = "http: private static final String ENDPOINT_SCHEME_HTTPS_PREFIX = "https: private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"; private static final String SAS_VALUE_PREFIX = "sharedaccesssignature "; private static final String ENTITY_PATH = "EntityPath"; private static final String CONNECTION_STRING_WITH_ACCESS_KEY = "Endpoint={endpoint};" + "SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}"; private static final String CONNECTION_STRING_WITH_SAS = "Endpoint={endpoint};SharedAccessSignature=" + "SharedAccessSignature {sharedAccessSignature};EntityPath={entityPath}"; private static final String ERROR_MESSAGE_FORMAT = String.format(Locale.US, "Could not parse 'connectionString'. Expected format: %s or %s.", CONNECTION_STRING_WITH_ACCESS_KEY, CONNECTION_STRING_WITH_SAS); private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; /** * Creates a new instance by parsing the {@code connectionString} into its components. * @param connectionString The connection string to the Event Hub instance. * * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if {@code connectionString} is an empty string or the connection string has * an invalid format. */ public ConnectionStringProperties(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw new IllegalArgumentException("'connectionString' cannot be an empty string."); } final String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); URI endpoint = null; String entityPath = null; String sharedAccessKeyName = null; String sharedAccessKeyValue = null; String sharedAccessSignature = null; for (String tokenValuePair : tokenValuePairs) { final String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPARATOR, 2); if (pair.length != 2) { throw new IllegalArgumentException(String.format( Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } final String key = pair[0].trim(); final String value = pair[1].trim(); if (key.equalsIgnoreCase(ENDPOINT)) { final String endpointUri = validateAndUpdateDefaultScheme(value); try { endpoint = new URI(endpointUri); } catch (URISyntaxException e) { throw new IllegalArgumentException( String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { sharedAccessKeyName = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY)) { sharedAccessKeyValue = value; } else if (key.equalsIgnoreCase(ENTITY_PATH)) { entityPath = value; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE) && value.toLowerCase(Locale.ROOT).startsWith(SAS_VALUE_PREFIX)) { sharedAccessSignature = value; } else { throw new IllegalArgumentException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } boolean includesSharedKey = sharedAccessKeyName != null || sharedAccessKeyValue != null; boolean hasSharedKeyAndValue = sharedAccessKeyName != null && sharedAccessKeyValue != null; boolean includesSharedAccessSignature = sharedAccessSignature != null; if (endpoint == null || (includesSharedKey && includesSharedAccessSignature) || (!hasSharedKeyAndValue && !includesSharedAccessSignature)) { throw logger.logExceptionAsError(new IllegalArgumentException(ERROR_MESSAGE_FORMAT)); } this.endpoint = endpoint; this.entityPath = entityPath; this.sharedAccessKeyName = sharedAccessKeyName; this.sharedAccessKey = sharedAccessKeyValue; this.sharedAccessSignature = sharedAccessSignature; } /** * Gets the endpoint to be used for connecting to the AMQP message broker. * @return The endpoint address, including protocol, from the connection string. */ public URI getEndpoint() { return endpoint; } /** * Gets the entity path to connect to in the message broker. * @return The entity path to connect to in the message broker. */ public String getEntityPath() { return entityPath; } /** * Gets the name of the shared access key, either for the Event Hubs namespace or the Event Hub instance. * @return The name of the shared access key. */ public String getSharedAccessKeyName() { return sharedAccessKeyName; } /** * The value of the shared access key, either for the Event Hubs namespace or the Event Hub. * @return The value of the shared access key. */ public String getSharedAccessKey() { return sharedAccessKey; } /** * The value of the shared access signature, if the connection string used to create this instance included the * shared access signature component. * @return The shared access signature value, if included in the connection string. */ public String getSharedAccessSignature() { return sharedAccessSignature; } /* * The function checks for pre existing scheme of "sb: * in endpoint, it will set the default scheme to "sb: */ }
We are iterating over the events twice. It's happening once here and then again in `sendEventGridEventsWithResponse` method when converting from public EventGridEvent to internal EventGridEvent. We should avoid doing double iteration.
public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { List<CloudEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((CloudEvent) event)); return this.sendCloudEventsWithResponse(eventList, context); } else if (this.eventClass == EventGridEvent.class) { List<EventGridEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((EventGridEvent) event)); return this.sendEventGridEventsWithResponse(eventList, context); } else { List<Object> eventList = new ArrayList<>(); events.forEach(eventList::add); return this.sendCustomEventsWithResponse(eventList, context); } }
events.forEach(event -> eventList.add((EventGridEvent) event));
else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEvents((Iterable<EventGridEvent>) events, context); }
class EventGridPublisherAsyncClient<T> { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherAsyncClient.class); private final ObjectSerializer eventDataSerializer; private final Object eventClass; private static final ClientLogger LOGGER = new ClientLogger(EventGridPublisherClient.class); EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, EventGridServiceVersion serviceVersion, ObjectSerializer eventDataSerializer, Class<T> eventClass) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; this.eventDataSerializer = eventDataSerializer; this.eventClass = eventClass; } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service with the latest Event Grid service API defined in {@link EventGridServiceVersion * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime) { return generateSas(endpoint, keyCredential, expirationTime, EventGridServiceVersion.getLatest()); } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * @param apiVersion the EventGrid service api version defined in {@link EventGridServiceVersion} * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime, EventGridServiceVersion apiVersion) { try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; endpoint = endpoint + "?api-version=" + apiVersion.getVersion(); String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a")), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance("hmacSHA256"); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), "hmacSHA256")); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Publishes the given events to the set topic or domain. * @param events the events to publish. * * @return A {@link Mono} that completes when the events are sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<T> events) { return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEvents(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { List<CloudEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((CloudEvent) event)); return this.sendCloudEvents(eventList, context); } else { List<Object> eventList = new ArrayList<>(); events.forEach(eventList::add); return this.sendCustomEvents(eventList, context); } } /** * Publishes the given events to the set topic or domain and gives the response issued by EventGrid. * @param events the events to publish. * @param context the context to use along the pipeline. * * @return the response from the EventGrid service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { List<CloudEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((CloudEvent) event)); return this.sendCloudEventsWithResponse(eventList, context); } else if (this.eventClass == EventGridEvent.class) { List<EventGridEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((EventGridEvent) event)); return this.sendEventGridEventsWithResponse(eventList, context); } else { List<Object> eventList = new ArrayList<>(); events.forEach(eventList::add); return this.sendCustomEventsWithResponse(eventList, context); } } /** * Publishes the given events to the set topic or domain. * @param event the event to publish. * * @return A {@link Mono} that completes when the event is sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvent(T event) { List<T> events = new ArrayList<>(); events.add(event); return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEventGridEvents(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.EventGridEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.CloudEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendEventGridEventsWithResponse(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } private void addCloudEventTracePlaceHolder(Iterable<CloudEvent> events) { if (TracerProxy.isTracingEnabled()) { for (CloudEvent event : events) { if (event.getExtensionAttributes() == null || (event.getExtensionAttributes().get(Constants.TRACE_PARENT) == null && event.getExtensionAttributes().get(Constants.TRACE_STATE) == null)) { event.addExtensionAttribute(Constants.TRACE_PARENT, Constants.TRACE_PARENT_PLACEHOLDER_UUID); event.addExtensionAttribute(Constants.TRACE_STATE, Constants.TRACE_STATE_PLACEHOLDER_UUID); } } } } }
class EventGridPublisherAsyncClient<T> { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherAsyncClient.class); private final ObjectSerializer eventDataSerializer; private final Class<T> eventClass; private static final DateTimeFormatter SAS_DATE_TIME_FORMATER = DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a"); private static final String HMAC_SHA256 = "hmacSHA256"; private static final String API_VERSION = "api-version"; private static final ClientLogger LOGGER = new ClientLogger(EventGridPublisherClient.class); EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, EventGridServiceVersion serviceVersion, ObjectSerializer eventDataSerializer, Class<T> eventClass) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; this.eventDataSerializer = eventDataSerializer; this.eventClass = eventClass; } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service with the latest Event Grid service API defined in {@link EventGridServiceVersion * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if endpoint, keyCredential or expirationTime is {@code null}. * @throws RuntimeException if java security doesn't have algorithm "hmacSHA256". */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime) { return generateSas(endpoint, keyCredential, expirationTime, EventGridServiceVersion.getLatest()); } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * @param apiVersion the EventGrid service api version defined in {@link EventGridServiceVersion} * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if endpoint, keyCredential or expirationTime is {@code null}. * @throws RuntimeException if java security doesn't have algorithm "hmacSHA256". */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime, EventGridServiceVersion apiVersion) { if (Objects.isNull(endpoint)) { throw LOGGER.logExceptionAsError(new NullPointerException("'endpoint' cannot be null.")); } if (Objects.isNull(keyCredential)) { throw LOGGER.logExceptionAsError(new NullPointerException("'keyCredetial' cannot be null.")); } if (Objects.isNull(expirationTime)) { throw LOGGER.logExceptionAsError(new NullPointerException("'expirationTime' cannot be null.")); } try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; endpoint = String.format("%s?%s=%s", endpoint, API_VERSION, apiVersion.getVersion()); String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( SAS_DATE_TIME_FORMATER), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance(HMAC_SHA256); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), HMAC_SHA256)); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Publishes the given events to the set topic or domain. * @param events the events to publish. * * @return A {@link Mono} that completes when the events are sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<T> events) { return withContext(context -> sendEvents(events, context)); } @SuppressWarnings("unchecked") Mono<Void> sendEvents(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEvents((Iterable<CloudEvent>) events, context); } else { return this.sendCustomEvents((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain and gives the response issued by EventGrid. * @param events the events to publish. * @param context the context to use along the pipeline. * * @return the response from the EventGrid service. * @throws NullPointerException if events is {@code null}. */ @SuppressWarnings("unchecked") @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEventsWithResponse((Iterable<CloudEvent>) events, context); } else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEventsWithResponse((Iterable<EventGridEvent>)events, context); } else { return this.sendCustomEventsWithResponse((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain. * @param event the event to publish. * * @return A {@link Mono} that completes when the event is sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvent(T event) { List<T> events = Arrays.asList(event); return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEventGridEvents(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.EventGridEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.CloudEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendEventGridEventsWithResponse(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } private void addCloudEventTracePlaceHolder(Iterable<CloudEvent> events) { if (TracerProxy.isTracingEnabled()) { for (CloudEvent event : events) { if (event.getExtensionAttributes() == null || (event.getExtensionAttributes().get(Constants.TRACE_PARENT) == null && event.getExtensionAttributes().get(Constants.TRACE_STATE) == null)) { event.addExtensionAttribute(Constants.TRACE_PARENT, Constants.TRACE_PARENT_PLACEHOLDER_UUID); event.addExtensionAttribute(Constants.TRACE_STATE, Constants.TRACE_STATE_PLACEHOLDER_UUID); } } } } }
I changed to use cast `(Iterable<EventGridEvent>) events` and @SuppressWarnings("unchecked") to avoid the extran iteration
public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { List<CloudEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((CloudEvent) event)); return this.sendCloudEventsWithResponse(eventList, context); } else if (this.eventClass == EventGridEvent.class) { List<EventGridEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((EventGridEvent) event)); return this.sendEventGridEventsWithResponse(eventList, context); } else { List<Object> eventList = new ArrayList<>(); events.forEach(eventList::add); return this.sendCustomEventsWithResponse(eventList, context); } }
events.forEach(event -> eventList.add((EventGridEvent) event));
else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEvents((Iterable<EventGridEvent>) events, context); }
class EventGridPublisherAsyncClient<T> { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherAsyncClient.class); private final ObjectSerializer eventDataSerializer; private final Object eventClass; private static final ClientLogger LOGGER = new ClientLogger(EventGridPublisherClient.class); EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, EventGridServiceVersion serviceVersion, ObjectSerializer eventDataSerializer, Class<T> eventClass) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; this.eventDataSerializer = eventDataSerializer; this.eventClass = eventClass; } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service with the latest Event Grid service API defined in {@link EventGridServiceVersion * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime) { return generateSas(endpoint, keyCredential, expirationTime, EventGridServiceVersion.getLatest()); } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * @param apiVersion the EventGrid service api version defined in {@link EventGridServiceVersion} * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime, EventGridServiceVersion apiVersion) { try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; endpoint = endpoint + "?api-version=" + apiVersion.getVersion(); String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a")), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance("hmacSHA256"); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), "hmacSHA256")); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Publishes the given events to the set topic or domain. * @param events the events to publish. * * @return A {@link Mono} that completes when the events are sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<T> events) { return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEvents(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { List<CloudEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((CloudEvent) event)); return this.sendCloudEvents(eventList, context); } else { List<Object> eventList = new ArrayList<>(); events.forEach(eventList::add); return this.sendCustomEvents(eventList, context); } } /** * Publishes the given events to the set topic or domain and gives the response issued by EventGrid. * @param events the events to publish. * @param context the context to use along the pipeline. * * @return the response from the EventGrid service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { List<CloudEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((CloudEvent) event)); return this.sendCloudEventsWithResponse(eventList, context); } else if (this.eventClass == EventGridEvent.class) { List<EventGridEvent> eventList = new ArrayList<>(); events.forEach(event -> eventList.add((EventGridEvent) event)); return this.sendEventGridEventsWithResponse(eventList, context); } else { List<Object> eventList = new ArrayList<>(); events.forEach(eventList::add); return this.sendCustomEventsWithResponse(eventList, context); } } /** * Publishes the given events to the set topic or domain. * @param event the event to publish. * * @return A {@link Mono} that completes when the event is sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvent(T event) { List<T> events = new ArrayList<>(); events.add(event); return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEventGridEvents(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.EventGridEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.CloudEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendEventGridEventsWithResponse(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } private void addCloudEventTracePlaceHolder(Iterable<CloudEvent> events) { if (TracerProxy.isTracingEnabled()) { for (CloudEvent event : events) { if (event.getExtensionAttributes() == null || (event.getExtensionAttributes().get(Constants.TRACE_PARENT) == null && event.getExtensionAttributes().get(Constants.TRACE_STATE) == null)) { event.addExtensionAttribute(Constants.TRACE_PARENT, Constants.TRACE_PARENT_PLACEHOLDER_UUID); event.addExtensionAttribute(Constants.TRACE_STATE, Constants.TRACE_STATE_PLACEHOLDER_UUID); } } } } }
class EventGridPublisherAsyncClient<T> { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherAsyncClient.class); private final ObjectSerializer eventDataSerializer; private final Class<T> eventClass; private static final DateTimeFormatter SAS_DATE_TIME_FORMATER = DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a"); private static final String HMAC_SHA256 = "hmacSHA256"; private static final String API_VERSION = "api-version"; private static final ClientLogger LOGGER = new ClientLogger(EventGridPublisherClient.class); EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, EventGridServiceVersion serviceVersion, ObjectSerializer eventDataSerializer, Class<T> eventClass) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; this.eventDataSerializer = eventDataSerializer; this.eventClass = eventClass; } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service with the latest Event Grid service API defined in {@link EventGridServiceVersion * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if endpoint, keyCredential or expirationTime is {@code null}. * @throws RuntimeException if java security doesn't have algorithm "hmacSHA256". */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime) { return generateSas(endpoint, keyCredential, expirationTime, EventGridServiceVersion.getLatest()); } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * @param apiVersion the EventGrid service api version defined in {@link EventGridServiceVersion} * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if endpoint, keyCredential or expirationTime is {@code null}. * @throws RuntimeException if java security doesn't have algorithm "hmacSHA256". */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime, EventGridServiceVersion apiVersion) { if (Objects.isNull(endpoint)) { throw LOGGER.logExceptionAsError(new NullPointerException("'endpoint' cannot be null.")); } if (Objects.isNull(keyCredential)) { throw LOGGER.logExceptionAsError(new NullPointerException("'keyCredetial' cannot be null.")); } if (Objects.isNull(expirationTime)) { throw LOGGER.logExceptionAsError(new NullPointerException("'expirationTime' cannot be null.")); } try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; endpoint = String.format("%s?%s=%s", endpoint, API_VERSION, apiVersion.getVersion()); String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( SAS_DATE_TIME_FORMATER), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance(HMAC_SHA256); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), HMAC_SHA256)); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Publishes the given events to the set topic or domain. * @param events the events to publish. * * @return A {@link Mono} that completes when the events are sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<T> events) { return withContext(context -> sendEvents(events, context)); } @SuppressWarnings("unchecked") Mono<Void> sendEvents(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEvents((Iterable<CloudEvent>) events, context); } else { return this.sendCustomEvents((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain and gives the response issued by EventGrid. * @param events the events to publish. * @param context the context to use along the pipeline. * * @return the response from the EventGrid service. * @throws NullPointerException if events is {@code null}. */ @SuppressWarnings("unchecked") @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEventsWithResponse((Iterable<CloudEvent>) events, context); } else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEventsWithResponse((Iterable<EventGridEvent>)events, context); } else { return this.sendCustomEventsWithResponse((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain. * @param event the event to publish. * * @return A {@link Mono} that completes when the event is sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvent(T event) { List<T> events = Arrays.asList(event); return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEventGridEvents(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.EventGridEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.CloudEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendEventGridEventsWithResponse(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } private void addCloudEventTracePlaceHolder(Iterable<CloudEvent> events) { if (TracerProxy.isTracingEnabled()) { for (CloudEvent event : events) { if (event.getExtensionAttributes() == null || (event.getExtensionAttributes().get(Constants.TRACE_PARENT) == null && event.getExtensionAttributes().get(Constants.TRACE_STATE) == null)) { event.addExtensionAttribute(Constants.TRACE_PARENT, Constants.TRACE_PARENT_PLACEHOLDER_UUID); event.addExtensionAttribute(Constants.TRACE_STATE, Constants.TRACE_STATE_PLACEHOLDER_UUID); } } } } }
```suggestion List<T> events = Arrays.asList(event); ```
public Mono<Void> sendEvent(T event) { List<T> events = new ArrayList<>(); events.add(event); return withContext(context -> sendEvents(events, context)); }
events.add(event);
public Mono<Void> sendEvent(T event) { List<T> events = Arrays.asList(event); return withContext(context -> sendEvents(events, context)); }
class EventGridPublisherAsyncClient<T> { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherAsyncClient.class); private final ObjectSerializer eventDataSerializer; private final Class<T> eventClass; private static final DateTimeFormatter SAS_DATE_TIME_FORMATER = DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a"); private static final String HMAC_SHA256 = "hmacSHA256"; private static final String API_VERSION = "api-version"; private static final ClientLogger LOGGER = new ClientLogger(EventGridPublisherClient.class); EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, EventGridServiceVersion serviceVersion, ObjectSerializer eventDataSerializer, Class<T> eventClass) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; this.eventDataSerializer = eventDataSerializer; this.eventClass = eventClass; } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service with the latest Event Grid service API defined in {@link EventGridServiceVersion * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if keyCredential or expirationTime is {@code null}. */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime) { return generateSas(endpoint, keyCredential, expirationTime, EventGridServiceVersion.getLatest()); } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * @param apiVersion the EventGrid service api version defined in {@link EventGridServiceVersion} * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if keyCredential or expirationTime is {@code null}. */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime, EventGridServiceVersion apiVersion) { if (Objects.isNull(endpoint)) { throw LOGGER.logExceptionAsError(new NullPointerException("'endpoint' cannot be null.")); } if (Objects.isNull(keyCredential)) { throw LOGGER.logExceptionAsError(new NullPointerException("'keyCredetial' cannot be null.")); } if (Objects.isNull(expirationTime)) { throw LOGGER.logExceptionAsError(new NullPointerException("'expirationTime' cannot be null.")); } try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; endpoint = String.format("%s?%s=%s", endpoint, API_VERSION, apiVersion.getVersion()); String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( SAS_DATE_TIME_FORMATER), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance(HMAC_SHA256); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), HMAC_SHA256)); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Publishes the given events to the set topic or domain. * @param events the events to publish. * * @return A {@link Mono} that completes when the events are sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<T> events) { return withContext(context -> sendEvents(events, context)); } @SuppressWarnings("unchecked") Mono<Void> sendEvents(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEvents((Iterable<CloudEvent>) events, context); } else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEvents((Iterable<EventGridEvent>) events, context); } else { return this.sendCustomEvents((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain and gives the response issued by EventGrid. * @param events the events to publish. * @param context the context to use along the pipeline. * * @return the response from the EventGrid service. * @throws NullPointerException if events is {@code null}. */ @SuppressWarnings("unchecked") @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEventsWithResponse((Iterable<CloudEvent>) events, context); } else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEventsWithResponse((Iterable<EventGridEvent>)events, context); } else { return this.sendCustomEventsWithResponse((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain. * @param event the event to publish. * * @return A {@link Mono} that completes when the event is sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Void> sendEventGridEvents(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.EventGridEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.CloudEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendEventGridEventsWithResponse(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } private void addCloudEventTracePlaceHolder(Iterable<CloudEvent> events) { if (TracerProxy.isTracingEnabled()) { for (CloudEvent event : events) { if (event.getExtensionAttributes() == null || (event.getExtensionAttributes().get(Constants.TRACE_PARENT) == null && event.getExtensionAttributes().get(Constants.TRACE_STATE) == null)) { event.addExtensionAttribute(Constants.TRACE_PARENT, Constants.TRACE_PARENT_PLACEHOLDER_UUID); event.addExtensionAttribute(Constants.TRACE_STATE, Constants.TRACE_STATE_PLACEHOLDER_UUID); } } } } }
class EventGridPublisherAsyncClient<T> { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherAsyncClient.class); private final ObjectSerializer eventDataSerializer; private final Class<T> eventClass; private static final DateTimeFormatter SAS_DATE_TIME_FORMATER = DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a"); private static final String HMAC_SHA256 = "hmacSHA256"; private static final String API_VERSION = "api-version"; private static final ClientLogger LOGGER = new ClientLogger(EventGridPublisherClient.class); EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, EventGridServiceVersion serviceVersion, ObjectSerializer eventDataSerializer, Class<T> eventClass) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; this.eventDataSerializer = eventDataSerializer; this.eventClass = eventClass; } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service with the latest Event Grid service API defined in {@link EventGridServiceVersion * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if endpoint, keyCredential or expirationTime is {@code null}. * @throws RuntimeException if java security doesn't have algorithm "hmacSHA256". */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime) { return generateSas(endpoint, keyCredential, expirationTime, EventGridServiceVersion.getLatest()); } /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * @param apiVersion the EventGrid service api version defined in {@link EventGridServiceVersion} * * @return the shared access signature string which can be used to construct an instance of * {@link AzureSasCredential}. * * @throws NullPointerException if endpoint, keyCredential or expirationTime is {@code null}. * @throws RuntimeException if java security doesn't have algorithm "hmacSHA256". */ public static String generateSas(String endpoint, AzureKeyCredential keyCredential, OffsetDateTime expirationTime, EventGridServiceVersion apiVersion) { if (Objects.isNull(endpoint)) { throw LOGGER.logExceptionAsError(new NullPointerException("'endpoint' cannot be null.")); } if (Objects.isNull(keyCredential)) { throw LOGGER.logExceptionAsError(new NullPointerException("'keyCredetial' cannot be null.")); } if (Objects.isNull(expirationTime)) { throw LOGGER.logExceptionAsError(new NullPointerException("'expirationTime' cannot be null.")); } try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; endpoint = String.format("%s?%s=%s", endpoint, API_VERSION, apiVersion.getVersion()); String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( SAS_DATE_TIME_FORMATER), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance(HMAC_SHA256); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), HMAC_SHA256)); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } /** * Publishes the given events to the set topic or domain. * @param events the events to publish. * * @return A {@link Mono} that completes when the events are sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<T> events) { return withContext(context -> sendEvents(events, context)); } @SuppressWarnings("unchecked") Mono<Void> sendEvents(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEvents((Iterable<CloudEvent>) events, context); } else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEvents((Iterable<EventGridEvent>) events, context); } else { return this.sendCustomEvents((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain and gives the response issued by EventGrid. * @param events the events to publish. * @param context the context to use along the pipeline. * * @return the response from the EventGrid service. * @throws NullPointerException if events is {@code null}. */ @SuppressWarnings("unchecked") @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<T> events, Context context) { if(this.eventClass == CloudEvent.class) { return this.sendCloudEventsWithResponse((Iterable<CloudEvent>) events, context); } else if (this.eventClass == EventGridEvent.class) { return this.sendEventGridEventsWithResponse((Iterable<EventGridEvent>)events, context); } else { return this.sendCustomEventsWithResponse((Iterable<Object>) events, context); } } /** * Publishes the given events to the set topic or domain. * @param event the event to publish. * * @return A {@link Mono} that completes when the event is sent to the service. * @throws NullPointerException if events is {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Void> sendEventGridEvents(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.EventGridEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(event -> { com.azure.messaging.eventgrid.implementation.models.CloudEvent internalEvent = event.toImpl(); if (this.eventDataSerializer != null && internalEvent.getData() != null) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); eventDataSerializer.serialize(bos, event.getData()); internalEvent.setData(Base64.getEncoder().encode(bos.toByteArray())); } return internalEvent; }) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendEventGridEventsWithResponse(Iterable<EventGridEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; this.addCloudEventTracePlaceHolder(events); return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } final Context finalContext = context != null ? context : Context.NONE; return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, finalContext.addData(AZ_TRACING_NAMESPACE_KEY, Constants.EVENT_GRID_TRACING_NAMESPACE_VALUE))); } private void addCloudEventTracePlaceHolder(Iterable<CloudEvent> events) { if (TracerProxy.isTracingEnabled()) { for (CloudEvent event : events) { if (event.getExtensionAttributes() == null || (event.getExtensionAttributes().get(Constants.TRACE_PARENT) == null && event.getExtensionAttributes().get(Constants.TRACE_STATE) == null)) { event.addExtensionAttribute(Constants.TRACE_PARENT, Constants.TRACE_PARENT_PLACEHOLDER_UUID); event.addExtensionAttribute(Constants.TRACE_STATE, Constants.TRACE_STATE_PLACEHOLDER_UUID); } } } } }
nit: I don't think we need to use `Flux` here, instead we can use `Arrays.stream` and change some downstream logic. Using `Flux` to eventually block always gives me concerns as it could lead to an exception being thrown if the underlying thread calling the code is a non-blocking reactive thread.
private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; }
String endpoint = Flux.fromArray(uri.getQuery().split("&"))
private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; }
class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; }
class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; }
For debug-ability, do you think we could capture all `ENDPOINT_QUERY_KEY`s in the query parameters and if there is more than 1 we log an informational message and indicate that the last value will be selected. That way if someone runs into an issue with our decision they have breadcrumbs to a resolution.
private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; }
.map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1))
private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; }
class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; }
class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; }
Let's turn this into a constant so we don't create and concatenate strings every time this path is triggered
private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; }
+ "of the format \"azb:
private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; }
class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; }
class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; }
Does any additional logic need to change here to prevent two endpoints pointing to the same account but using different styles or containing additional information such as query params?
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String endpoint = extractAccountEndpoint(uri); if (this.openFileSystems.containsKey(endpoint)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + endpoint)); } AzureFileSystem afs = new AzureFileSystem(this, endpoint, config); this.openFileSystems.put(endpoint, afs); return afs; }
if (this.openFileSystems.containsKey(endpoint)) {
public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String endpoint = extractAccountEndpoint(uri); if (this.openFileSystems.containsKey(endpoint)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + endpoint)); } AzureFileSystem afs = new AzureFileSystem(this, endpoint, config); this.openFileSystems.put(endpoint, afs); return afs; }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ENDPOINT_QUERY_KEY = "endpoint"; private static final int COPY_TIMEOUT_SECONDS = 30; private static final Set<OpenOption> OUTPUT_STREAM_DEFAULT_OPTIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING))); private static final Set<OpenOption> OUTPUT_STREAM_SUPPORTED_OPTIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING))); private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String endpoint = extractAccountEndpoint(uri); if (!this.openFileSystems.containsKey(endpoint)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + endpoint)); } return this.openFileSystems.get(endpoint); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Opens or creates a file, returning a seekable byte channel to access the file. * <p> * This method is primarily offered to support some jdk convenience methods such as * {@link Files * only be opened in read mode OR write mode. It may not be opened in read/write mode. Seeking is supported for * reads, but not for writes. Modifications to existing files is not permitted--only creating new files or * overwriting existing files. * <p> * This type is not threadsafe to prevent having to hold locks across network calls. * <p> * * @param path the path of the file to open * @param set options specifying how the file should be opened * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { if (Objects.isNull(set)) { set = Collections.emptySet(); } if (set.contains(StandardOpenOption.WRITE)) { return new AzureSeekableByteChannel( (NioBlobOutputStream) this.newOutputStreamInternal(path, set, fileAttributes), path); } else { return new AzureSeekableByteChannel( (NioBlobInputStream) this.newInputStream(path, set.toArray(new OpenOption[0])), path); } } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { return newOutputStreamInternal(path, new HashSet<>(Arrays.asList(options))); } OutputStream newOutputStreamInternal(Path path, Set<? extends OpenOption> optionsSet, FileAttribute<?>... fileAttributes) throws IOException { if (optionsSet == null || optionsSet.size() == 0) { optionsSet = OUTPUT_STREAM_DEFAULT_OPTIONS; } for (OpenOption option : optionsSet) { if (!OUTPUT_STREAM_SUPPORTED_OPTIONS.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } /* Write must be specified. Either create_new or truncate must be specified. This is to ensure that no edits or appends are allowed. */ if (!optionsSet.contains(StandardOpenOption.WRITE) || !(optionsSet.contains(StandardOpenOption.TRUNCATE_EXISTING) || optionsSet.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and either CreateNew or TruncateExisting must be specified to open " + "an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsSet.contains(StandardOpenOption.CREATE) || optionsSet.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsSet.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsSet.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } if (fileAttributes == null) { fileAttributes = new FileAttribute<?>[0]; } resource.setFileAttributes(Arrays.asList(fileAttributes)); return new NioBlobOutputStream(resource.getBlobOutputStream(pto, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } } /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ENDPOINT_QUERY_KEY = "endpoint"; private static final int COPY_TIMEOUT_SECONDS = 30; private static final Set<OpenOption> OUTPUT_STREAM_DEFAULT_OPTIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING))); private static final Set<OpenOption> OUTPUT_STREAM_SUPPORTED_OPTIONS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING))); private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String endpoint = extractAccountEndpoint(uri); if (!this.openFileSystems.containsKey(endpoint)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + endpoint)); } return this.openFileSystems.get(endpoint); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Opens or creates a file, returning a seekable byte channel to access the file. * <p> * This method is primarily offered to support some jdk convenience methods such as * {@link Files * only be opened in read mode OR write mode. It may not be opened in read/write mode. Seeking is supported for * reads, but not for writes. Modifications to existing files is not permitted--only creating new files or * overwriting existing files. * <p> * This type is not threadsafe to prevent having to hold locks across network calls. * <p> * * @param path the path of the file to open * @param set options specifying how the file should be opened * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { if (Objects.isNull(set)) { set = Collections.emptySet(); } if (set.contains(StandardOpenOption.WRITE)) { return new AzureSeekableByteChannel( (NioBlobOutputStream) this.newOutputStreamInternal(path, set, fileAttributes), path); } else { return new AzureSeekableByteChannel( (NioBlobInputStream) this.newInputStream(path, set.toArray(new OpenOption[0])), path); } } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { return newOutputStreamInternal(path, new HashSet<>(Arrays.asList(options))); } OutputStream newOutputStreamInternal(Path path, Set<? extends OpenOption> optionsSet, FileAttribute<?>... fileAttributes) throws IOException { if (optionsSet == null || optionsSet.size() == 0) { optionsSet = OUTPUT_STREAM_DEFAULT_OPTIONS; } for (OpenOption option : optionsSet) { if (!OUTPUT_STREAM_SUPPORTED_OPTIONS.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } /* Write must be specified. Either create_new or truncate must be specified. This is to ensure that no edits or appends are allowed. */ if (!optionsSet.contains(StandardOpenOption.WRITE) || !(optionsSet.contains(StandardOpenOption.TRUNCATE_EXISTING) || optionsSet.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and either CreateNew or TruncateExisting must be specified to open " + "an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsSet.contains(StandardOpenOption.CREATE) || optionsSet.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsSet.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsSet.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } if (fileAttributes == null) { fileAttributes = new FileAttribute<?>[0]; } resource.setFileAttributes(Arrays.asList(fileAttributes)); return new NioBlobOutputStream(resource.getBlobOutputStream(pto, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } } /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountEndpoint(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String endpoint = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(endpoint)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account endpoint provided in URI" + " query.")); } return endpoint; } }
The `clientOptions` have applicationId which should get into `UserAgent` here. https://github.com/Azure/azure-sdk-for-java/blob/ea3f1d485aee21df0118c2a3ad5b66b45c538d4b/sdk/communication/azure-communication-identity/src/main/java/com/azure/communication/identity/CommunicationIdentityClientBuilder.java#L272 This is an old PR shows how UserAgent should be populated with `applicationId` https://github.com/Azure/azure-sdk-for-java/pull/16428/files#diff-57d5c797a106560d37148554ddcceb69f85c68f35b2a257823fbcc69b24a28aaR171
public CommunicationIdentityClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; }
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
public CommunicationIdentityClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; }
class CommunicationIdentityClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String COMMUNICATION_IDENTITY_PROPERTIES = "azure-communication-identity.properties"; private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class); private String endpoint; private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private HttpPipeline pipeline; private Configuration configuration; private ClientOptions clientOptions; private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES); private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); /** * Set endpoint of the service * * @param endpoint url of the service * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder endpoint(String endpoint) { this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Set endpoint of the service * * @param pipeline HttpPipeline to use, if a pipeline is not * supplied, the credential and httpClient fields must be set * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null."); return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Set credential to use * * @param accessKey access key for initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder accessKey(String accessKey) { Objects.requireNonNull(accessKey, "'accessKey' cannot be null."); this.azureKeyCredential = new AzureKeyCredential(accessKey); return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .accessKey(accessKey); return this; } /** * Set httpClient to use * * @param httpClient httpClient to use, overridden by the pipeline * field. * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) { this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null."); return this; } /** * Apply additional HttpPipelinePolicy * * @param customPolicy HttpPipelinePolicy object to be applied after * AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code clientOptions} is {@code null}. */ /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder configuration(Configuration configuration) { this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null."); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) { return this; } /** * Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityAsyncClient instance */ public CommunicationIdentityAsyncClient buildAsyncClient() { return new CommunicationIdentityAsyncClient(createServiceImpl()); } /** * Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityClient instance */ public CommunicationIdentityClient buildClient() { return new CommunicationIdentityClient(createServiceImpl()); } private CommunicationIdentityClientImpl createServiceImpl() { Objects.requireNonNull(endpoint); HttpPipeline builderPipeline = this.pipeline; if (this.pipeline == null) { builderPipeline = createHttpPipeline(httpClient, createHttpPipelineAuthPolicy(), customPolicies); } CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder(); clientBuilder.endpoint(endpoint) .pipeline(builderPipeline); return clientBuilder.buildClient(); } private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null && this.azureKeyCredential != null) { throw logger.logExceptionAsError( new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used.")); } if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> customPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (customPolicies != null && customPolicies.size() > 0) { policies.addAll(customPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration)); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(httpLogOptions)); } }
class CommunicationIdentityClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String COMMUNICATION_IDENTITY_PROPERTIES = "azure-communication-identity.properties"; private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class); private String endpoint; private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private HttpPipeline pipeline; private Configuration configuration; private ClientOptions clientOptions; private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES); private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); /** * Set endpoint of the service * * @param endpoint url of the service * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder endpoint(String endpoint) { this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Set endpoint of the service * * @param pipeline HttpPipeline to use, if a pipeline is not * supplied, the credential and httpClient fields must be set * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null."); return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Set credential to use * * @param accessKey access key for initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder accessKey(String accessKey) { Objects.requireNonNull(accessKey, "'accessKey' cannot be null."); this.azureKeyCredential = new AzureKeyCredential(accessKey); return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .accessKey(accessKey); return this; } /** * Set httpClient to use * * @param httpClient httpClient to use, overridden by the pipeline * field. * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) { this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null."); return this; } /** * Apply additional HttpPipelinePolicy * * @param customPolicy HttpPipelinePolicy object to be applied after * AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code clientOptions} is {@code null}. */ /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder configuration(Configuration configuration) { this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null."); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) { return this; } /** * Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityAsyncClient instance */ public CommunicationIdentityAsyncClient buildAsyncClient() { return new CommunicationIdentityAsyncClient(createServiceImpl()); } /** * Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityClient instance */ public CommunicationIdentityClient buildClient() { return new CommunicationIdentityClient(createServiceImpl()); } private CommunicationIdentityClientImpl createServiceImpl() { Objects.requireNonNull(endpoint); HttpPipeline builderPipeline = this.pipeline; if (this.pipeline == null) { builderPipeline = createHttpPipeline(httpClient, createHttpPipelineAuthPolicy(), customPolicies); } CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder(); clientBuilder.endpoint(endpoint) .pipeline(builderPipeline); return clientBuilder.buildClient(); } private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null && this.azureKeyCredential != null) { throw logger.logExceptionAsError( new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used.")); } if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> customPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (customPolicies != null && customPolicies.size() > 0) { policies.addAll(customPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions; HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions; String applicationId = null; if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) { applicationId = buildClientOptions.getApplicationId(); } else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) { applicationId = buildLogOptions.getApplicationId(); } policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration)); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(httpLogOptions)); } }
Thanks for this. Addressed
public CommunicationIdentityClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; }
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
public CommunicationIdentityClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; }
class CommunicationIdentityClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String COMMUNICATION_IDENTITY_PROPERTIES = "azure-communication-identity.properties"; private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class); private String endpoint; private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private HttpPipeline pipeline; private Configuration configuration; private ClientOptions clientOptions; private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES); private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); /** * Set endpoint of the service * * @param endpoint url of the service * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder endpoint(String endpoint) { this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Set endpoint of the service * * @param pipeline HttpPipeline to use, if a pipeline is not * supplied, the credential and httpClient fields must be set * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null."); return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Set credential to use * * @param accessKey access key for initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder accessKey(String accessKey) { Objects.requireNonNull(accessKey, "'accessKey' cannot be null."); this.azureKeyCredential = new AzureKeyCredential(accessKey); return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .accessKey(accessKey); return this; } /** * Set httpClient to use * * @param httpClient httpClient to use, overridden by the pipeline * field. * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) { this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null."); return this; } /** * Apply additional HttpPipelinePolicy * * @param customPolicy HttpPipelinePolicy object to be applied after * AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code clientOptions} is {@code null}. */ /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder configuration(Configuration configuration) { this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null."); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) { return this; } /** * Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityAsyncClient instance */ public CommunicationIdentityAsyncClient buildAsyncClient() { return new CommunicationIdentityAsyncClient(createServiceImpl()); } /** * Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityClient instance */ public CommunicationIdentityClient buildClient() { return new CommunicationIdentityClient(createServiceImpl()); } private CommunicationIdentityClientImpl createServiceImpl() { Objects.requireNonNull(endpoint); HttpPipeline builderPipeline = this.pipeline; if (this.pipeline == null) { builderPipeline = createHttpPipeline(httpClient, createHttpPipelineAuthPolicy(), customPolicies); } CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder(); clientBuilder.endpoint(endpoint) .pipeline(builderPipeline); return clientBuilder.buildClient(); } private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null && this.azureKeyCredential != null) { throw logger.logExceptionAsError( new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used.")); } if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> customPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (customPolicies != null && customPolicies.size() > 0) { policies.addAll(customPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, configuration)); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(httpLogOptions)); } }
class CommunicationIdentityClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String COMMUNICATION_IDENTITY_PROPERTIES = "azure-communication-identity.properties"; private final ClientLogger logger = new ClientLogger(CommunicationIdentityClientBuilder.class); private String endpoint; private AzureKeyCredential azureKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions = new HttpLogOptions(); private HttpPipeline pipeline; private Configuration configuration; private ClientOptions clientOptions; private final Map<String, String> properties = CoreUtils.getProperties(COMMUNICATION_IDENTITY_PROPERTIES); private final List<HttpPipelinePolicy> customPolicies = new ArrayList<HttpPipelinePolicy>(); /** * Set endpoint of the service * * @param endpoint url of the service * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder endpoint(String endpoint) { this.endpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Set endpoint of the service * * @param pipeline HttpPipeline to use, if a pipeline is not * supplied, the credential and httpClient fields must be set * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = Objects.requireNonNull(pipeline, "'pipeline' cannot be null."); return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public CommunicationIdentityClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Set credential to use * * @param accessKey access key for initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder accessKey(String accessKey) { Objects.requireNonNull(accessKey, "'accessKey' cannot be null."); this.azureKeyCredential = new AzureKeyCredential(accessKey); return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing CommunicationClientCredential * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .accessKey(accessKey); return this; } /** * Set httpClient to use * * @param httpClient httpClient to use, overridden by the pipeline * field. * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder httpClient(HttpClient httpClient) { this.httpClient = Objects.requireNonNull(httpClient, "'httpClient' cannot be null."); return this; } /** * Apply additional HttpPipelinePolicy * * @param customPolicy HttpPipelinePolicy object to be applied after * AzureKeyCredentialPolicy, UserAgentPolicy, RetryPolicy, and CookiePolicy * @return CommunicationIdentityClientBuilder */ public CommunicationIdentityClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { this.customPolicies.add(Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.")); return this; } /** * Sets the client options for all the requests made through the client. * * @param clientOptions {@link ClientOptions}. * @return The updated {@link CommunicationIdentityClientBuilder} object. * @throws NullPointerException If {@code clientOptions} is {@code null}. */ /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder configuration(Configuration configuration) { this.configuration = Objects.requireNonNull(configuration, "'configuration' cannot be null."); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Sets the {@link CommunicationIdentityServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link CommunicationIdentityServiceVersion} of the service to be used when making requests. * @return the updated CommunicationIdentityClientBuilder object */ public CommunicationIdentityClientBuilder serviceVersion(CommunicationIdentityServiceVersion version) { return this; } /** * Create asynchronous client applying HMACAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityAsyncClient instance */ public CommunicationIdentityAsyncClient buildAsyncClient() { return new CommunicationIdentityAsyncClient(createServiceImpl()); } /** * Create synchronous client applying HmacAuthenticationPolicy, UserAgentPolicy, * RetryPolicy, and CookiePolicy. * Additional HttpPolicies specified by additionalPolicies will be applied after them * * @return CommunicationIdentityClient instance */ public CommunicationIdentityClient buildClient() { return new CommunicationIdentityClient(createServiceImpl()); } private CommunicationIdentityClientImpl createServiceImpl() { Objects.requireNonNull(endpoint); HttpPipeline builderPipeline = this.pipeline; if (this.pipeline == null) { builderPipeline = createHttpPipeline(httpClient, createHttpPipelineAuthPolicy(), customPolicies); } CommunicationIdentityClientImplBuilder clientBuilder = new CommunicationIdentityClientImplBuilder(); clientBuilder.endpoint(endpoint) .pipeline(builderPipeline); return clientBuilder.buildClient(); } private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null && this.azureKeyCredential != null) { throw logger.logExceptionAsError( new IllegalArgumentException("Both 'credential' and 'accessKey' are set. Just one may be used.")); } if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } } private HttpPipeline createHttpPipeline(HttpClient httpClient, HttpPipelinePolicy authorizationPolicy, List<HttpPipelinePolicy> customPolicies) { List<HttpPipelinePolicy> policies = new ArrayList<HttpPipelinePolicy>(); policies.add(authorizationPolicy); applyRequiredPolicies(policies); if (customPolicies != null && customPolicies.size() > 0) { policies.addAll(customPolicies); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); } private void applyRequiredPolicies(List<HttpPipelinePolicy> policies) { String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); ClientOptions buildClientOptions = (clientOptions == null) ? new ClientOptions() : clientOptions; HttpLogOptions buildLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions; String applicationId = null; if (!CoreUtils.isNullOrEmpty(buildClientOptions.getApplicationId())) { applicationId = buildClientOptions.getApplicationId(); } else if (!CoreUtils.isNullOrEmpty(buildLogOptions.getApplicationId())) { applicationId = buildLogOptions.getApplicationId(); } policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, configuration)); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); policies.add(new HttpLoggingPolicy(httpLogOptions)); } }
nit: This can be removed as the method being called into handles this
public Mono<SendMessageResult> sendMessage(BinaryData message) { try { return sendMessageWithResponse(message, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } }
}
public Mono<SendMessageResult> sendMessage(BinaryData message) { return sendMessageWithResponse(message, null, null).flatMap(FluxUtil::toMono); }
class QueueAsyncClient { private final ClientLogger logger = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; private final String accountName; private final QueueServiceVersion serviceVersion; private final QueueMessageEncoding messageEncoding; private final Function<QueueMessageDecodingFailure, Mono<Void>> messageDecodingFailedHandler; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@link * Each service call goes through the {@link HttpPipeline pipeline}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName, String accountName, QueueServiceVersion serviceVersion, QueueMessageEncoding messageEncoding, Function<QueueMessageDecodingFailure, Mono<Void>> messageDecodingFailedHandler) { Objects.requireNonNull(queueName, "'queueName' cannot be null."); this.queueName = queueName; this.client = client; this.accountName = accountName; this.serviceVersion = serviceVersion; this.messageEncoding = messageEncoding; this.messageDecodingFailedHandler = messageDecodingFailedHandler; } /** * @return the URL of the storage queue */ public String getQueueUrl() { return String.format("%s/%s", client.getUrl(), queueName); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public QueueServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the message encoding the client is using. * * @return the message encoding the client is using. */ public QueueMessageEncoding getMessageEncoding() { return messageEncoding; } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws QueueStorageException If a queue with the same name already exists in the queue service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> create() { try { return createWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @return A response that only contains headers and response status code * @throws QueueStorageException If a queue with the same name and different metadata already exists in the queue * service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> createWithResponse(Map<String, String> metadata) { try { return withContext(context -> createWithResponse(metadata, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> createWithResponse(Map<String, String> metadata, Context context) { context = context == null ? Context.NONE : context; return client.getQueues().createWithResponseAsync(queueName, null, metadata, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { try { return deleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse() { try { return withContext(this::deleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(Context context) { context = context == null ? Context.NONE : context; return client.getQueues().deleteWithResponseAsync(queueName, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getProperties() { try { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getPropertiesWithResponse() { try { return withContext(this::getPropertiesWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { context = context == null ? Context.NONE : context; return client.getQueues().getPropertiesWithResponseAsync(queueName, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata) { try { return withContext(context -> setMetadataWithResponse(metadata, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, Context context) { context = context == null ? Context.NONE : context; return client.getQueues() .setMetadataWithResponseAsync(queueName, null, metadata, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueSignedIdentifier> getAccessPolicy() { try { Function<String, Mono<PagedResponse<QueueSignedIdentifier>>> retriever = marker -> this.client.getQueues() .getAccessPolicyWithResponseAsync(queueName, null, null, Context.NONE) .map(response -> new PagedResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), null, response.getDeserializedHeaders())); return new PagedFlux<>(() -> retriever.apply(null), retriever); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.setAccessPolicy * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws QueueStorageException If the queue doesn't exist, a stored access policy doesn't have all fields filled * out, or the queue will have more than five policies. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> setAccessPolicy(Iterable<QueueSignedIdentifier> permissions) { try { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.setAccessPolicyWithResponse * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist, a stored access policy doesn't have all fields filled * out, or the queue will have more than five policies. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> setAccessPolicyWithResponse(Iterable<QueueSignedIdentifier> permissions) { try { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setAccessPolicyWithResponse(Iterable<QueueSignedIdentifier> permissions, Context context) { context = context == null ? Context.NONE : context; /* We truncate to seconds because the service only supports nanoseconds or seconds, but doing an OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This allows for proper serialization with no real detriment to users as sub-second precision on active time for signed identifiers is not really necessary. */ if (permissions != null) { for (QueueSignedIdentifier permission : permissions) { if (permission.getAccessPolicy() != null && permission.getAccessPolicy().getStartsOn() != null) { permission.getAccessPolicy().setStartsOn( permission.getAccessPolicy().getStartsOn().truncatedTo(ChronoUnit.SECONDS)); } if (permission.getAccessPolicy() != null && permission.getAccessPolicy().getExpiresOn() != null) { permission.getAccessPolicy().setExpiresOn( permission.getAccessPolicy().getExpiresOn().truncatedTo(ChronoUnit.SECONDS)); } } } List<QueueSignedIdentifier> permissionsList = StreamSupport.stream( permissions != null ? permissions.spliterator() : Spliterators.emptySpliterator(), false) .collect(Collectors.toList()); return client.getQueues() .setAccessPolicyWithResponseAsync(queueName, null, null, permissionsList, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> clearMessages() { try { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> clearMessagesWithResponse() { try { return withContext(this::clearMessagesWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> clearMessagesWithResponse(Context context) { context = context == null ? Context.NONE : context; return client.getMessages().clearWithResponseAsync(queueName, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SendMessageResult> sendMessage(String messageText) { try { return sendMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessage * * <p>For more information, see the * <a href="https: * * @param message Message content * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.sendMessageWithResponse-liveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue. If * unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue. If unset the value will default to * 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SendMessageResult>> sendMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { try { BinaryData message = messageText == null ? null : BinaryData.fromString(messageText); return withContext(context -> sendMessageWithResponse(message, visibilityTimeout, timeToLive, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.sendMessageWithResponse-liveTime * * <p>For more information, see the * <a href="https: * * @param message Message content. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue. If * unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue. If unset the value will default to * 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SendMessageResult>> sendMessageWithResponse(BinaryData message, Duration visibilityTimeout, Duration timeToLive) { try { return withContext(context -> sendMessageWithResponse(message, visibilityTimeout, timeToLive, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<SendMessageResult>> sendMessageWithResponse(BinaryData message, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); String messageText = encodeMessage(message); QueueMessage queueMessage = new QueueMessage().setMessageText(messageText); context = context == null ? Context.NONE : context; return client.getMessages() .enqueueWithResponseAsync(queueName, queueMessage, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, response.getValue().get(0))); } private String encodeMessage(BinaryData message) { if (message == null) { return null; } switch (messageEncoding) { case NONE: return message.toString(); case BASE64: return Base64.getEncoder().encodeToString(message.toBytes()); default: throw logger.logExceptionAsError( new IllegalArgumentException("Unsupported message encoding=" + messageEncoding)); } } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessage} * * <p>For more information, see the * <a href="https: * * @return The first {@link QueueMessageItem} in the queue, it contains {@link QueueMessageItem * messageId} and {@link QueueMessageItem * additionally it contains other metadata about the message. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueMessageItem> receiveMessage() { try { return receiveMessagesWithOptionalTimeout(1, null, null, Context.NONE).singleOrEmpty(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 * seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue * than requested all the messages will be returned. If left empty only 1 message will be retrieved, the allowed * range is 1 to 32 messages. * @return Up to {@code maxMessages} {@link QueueMessageItem ReceiveMessageItem} from the queue. * Each DequeuedMessage contains {@link QueueMessageItem * {@link QueueMessageItem * other metadata about the message. * @throws QueueStorageException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueMessageItem> receiveMessages(Integer maxMessages) { try { return receiveMessagesWithOptionalTimeout(maxMessages, null, null, Context.NONE); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue * than requested all the messages will be returned. If left empty only 1 message will be retrieved, the allowed * range is 1 to 32 messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue. If left * empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link QueueMessageItem DequeuedMessages} from the queue. Each DeqeuedMessage * contains {@link QueueMessageItem * {@link QueueMessageItem * used to interact with the message and other metadata about the message. * @throws QueueStorageException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueMessageItem> receiveMessages(Integer maxMessages, Duration visibilityTimeout) { try { return receiveMessagesWithOptionalTimeout(maxMessages, visibilityTimeout, null, Context.NONE); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<QueueMessageItem> receiveMessagesWithOptionalTimeout(Integer maxMessages, Duration visibilityTimeout, Duration timeout, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Function<String, Mono<PagedResponse<QueueMessageItem>>> retriever = marker -> StorageImplUtils.applyOptionalTimeout(this.client.getMessages() .dequeueWithResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, context), timeout) .flatMap(this::transformMessagesDequeueResponse); return new PagedFlux<>(() -> retriever.apply(null), retriever); } private Mono<PagedResponseBase<MessagesDequeueHeaders, QueueMessageItem>> transformMessagesDequeueResponse( MessagesDequeueResponse response) { List<QueueMessageItemInternal> queueMessageInternalItems = response.getValue(); List<QueueMessageItem> queueMessageItems = Collections.emptyList(); Mono<Void> mono = Mono.empty(); if (queueMessageInternalItems != null) { queueMessageItems = new ArrayList<>(queueMessageInternalItems.size()); for (QueueMessageItemInternal queueMessageItemInternal : queueMessageInternalItems) { try { queueMessageItems.add(transformQueueMessageItemInternal(queueMessageItemInternal, messageEncoding)); } catch (IllegalArgumentException e) { if (messageDecodingFailedHandler != null) { mono = mono.then(messageDecodingFailedHandler.apply( new QueueMessageDecodingFailure( this, transformQueueMessageItemInternal(queueMessageItemInternal, QueueMessageEncoding.NONE), null) )); } else { throw logger.logExceptionAsError(e); } } } } return mono.then(Mono.just(new PagedResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), queueMessageItems, null, response.getDeserializedHeaders()))); } private QueueMessageItem transformQueueMessageItemInternal( QueueMessageItemInternal queueMessageItemInternal, QueueMessageEncoding messageEncoding) { return new QueueMessageItem() .setMessageId(queueMessageItemInternal.getMessageId()) .setBody(decodeMessageBody(queueMessageItemInternal.getMessageText(), messageEncoding)) .setDequeueCount(queueMessageItemInternal.getDequeueCount()) .setExpirationTime(queueMessageItemInternal.getExpirationTime()) .setInsertionTime(queueMessageItemInternal.getInsertionTime()) .setPopReceipt(queueMessageItemInternal.getPopReceipt()) .setTimeNextVisible(queueMessageItemInternal.getTimeNextVisible()); } private BinaryData decodeMessageBody(String messageText, QueueMessageEncoding messageEncoding) { if (messageText == null) { return null; } switch (messageEncoding) { case NONE: return BinaryData.fromString(messageText); case BASE64: return BinaryData.fromBytes(Base64.getDecoder().decode(messageText)); default: throw logger.logExceptionAsError( new IllegalArgumentException("Unsupported message encoding=" + messageEncoding)); } } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessage} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessageItem} that contains metadata about the message. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PeekedMessageItem> peekMessage() { try { return peekMessages(null).singleOrEmpty(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue * than requested all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is * 1 to 32 messages. * @return Up to {@code maxMessages} {@link PeekedMessageItem PeekedMessages} from the queue. Each PeekedMessage * contains metadata about the message. * @throws QueueStorageException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PeekedMessageItem> peekMessages(Integer maxMessages) { try { return peekMessagesWithOptionalTimeout(maxMessages, null, Context.NONE); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<PeekedMessageItem> peekMessagesWithOptionalTimeout(Integer maxMessages, Duration timeout, Context context) { Function<String, Mono<PagedResponse<PeekedMessageItem>>> retriever = marker -> StorageImplUtils.applyOptionalTimeout(this.client.getMessages() .peekWithResponseAsync(queueName, maxMessages, null, null, context), timeout) .flatMap(this::transformMessagesPeekResponse); return new PagedFlux<>(() -> retriever.apply(null), retriever); } private Mono<PagedResponseBase<MessagesPeekHeaders, PeekedMessageItem>> transformMessagesPeekResponse( MessagesPeekResponse response) { List<PeekedMessageItemInternal> peekedMessageInternalItems = response.getValue(); List<PeekedMessageItem> peekedMessageItems = Collections.emptyList(); Mono<Void> mono = Mono.empty(); if (peekedMessageInternalItems != null) { peekedMessageItems = new ArrayList<>(peekedMessageInternalItems.size()); for (PeekedMessageItemInternal peekedMessageItemInternal : peekedMessageInternalItems) { try { peekedMessageItems.add(transformPeekedMessageItemInternal( peekedMessageItemInternal, messageEncoding)); } catch (IllegalArgumentException e) { if (messageDecodingFailedHandler != null) { mono = mono.then(messageDecodingFailedHandler.apply( new QueueMessageDecodingFailure( this, null, transformPeekedMessageItemInternal( peekedMessageItemInternal, QueueMessageEncoding.NONE)) )); } else { throw logger.logExceptionAsError(e); } } } } return mono.then(Mono.just(new PagedResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), peekedMessageItems, null, response.getDeserializedHeaders()))); } private PeekedMessageItem transformPeekedMessageItemInternal( PeekedMessageItemInternal peekedMessageItemInternal, QueueMessageEncoding messageEncoding) { return new PeekedMessageItem() .setMessageId(peekedMessageItemInternal.getMessageId()) .setBody(decodeMessageBody(peekedMessageItemInternal.getMessageText(), messageEncoding)) .setDequeueCount(peekedMessageItemInternal.getDequeueCount()) .setExpirationTime(peekedMessageItemInternal.getExpirationTime()) .setInsertionTime(peekedMessageItemInternal.getInsertionTime()); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.updateMessage * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param messageText Updated value for the message * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. The default value is Duration.ZERO. * @return A {@link UpdateMessageResult} that contains the new * {@link UpdateMessageResult * additionally contains the updated metadata about the message. * @throws QueueStorageException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UpdateMessageResult> updateMessage(String messageId, String popReceipt, String messageText, Duration visibilityTimeout) { try { return updateMessageWithResponse(messageId, popReceipt, messageText, visibilityTimeout) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.updateMessageWithResponse * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param messageText Updated value for the message * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. The default value is Duration.ZERO. * @return A {@link UpdateMessageResult} that contains the new * {@link UpdateMessageResult * additionally contains the updated metadata about the message. * @throws QueueStorageException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UpdateMessageResult>> updateMessageWithResponse(String messageId, String popReceipt, String messageText, Duration visibilityTimeout) { try { return withContext(context -> updateMessageWithResponse(messageId, popReceipt, messageText, visibilityTimeout, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<UpdateMessageResult>> updateMessageWithResponse(String messageId, String popReceipt, String messageText, Duration visibilityTimeout, Context context) { QueueMessage message = messageText == null ? null : new QueueMessage().setMessageText(messageText); context = context == null ? Context.NONE : context; visibilityTimeout = visibilityTimeout == null ? Duration.ZERO : visibilityTimeout; return client.getMessageIds().updateWithResponseAsync(queueName, messageId, popReceipt, (int) visibilityTimeout.getSeconds(), null, null, message, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.deleteMessage * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws QueueStorageException If the queue or messageId don't exist or the popReceipt doesn't match on the * message. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteMessage(String messageId, String popReceipt) { try { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.deleteMessageWithResponse * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue or messageId don't exist or the popReceipt doesn't match on the * message. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteMessageWithResponse(String messageId, String popReceipt) { try { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { context = context == null ? Context.NONE : context; return client.getMessageIds().deleteWithResponseAsync(queueName, messageId, popReceipt, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Get the queue name of the client. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getQueueName} * * @return The name of the queue. */ public String getQueueName() { return queueName; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return this.accountName; } /** * Generates a service sas for the queue using the specified {@link QueueServiceSasSignatureValues} * <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link QueueServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.generateSas * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * * @return A {@code String} representing the SAS query parameters. */ public String generateSas(QueueServiceSasSignatureValues queueServiceSasSignatureValues) { return generateSas(queueServiceSasSignatureValues, Context.NONE); } /** * Generates a service sas for the queue using the specified {@link QueueServiceSasSignatureValues} * <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link QueueServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.generateSas * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. * * @return A {@code String} representing the SAS query parameters. */ public String generateSas(QueueServiceSasSignatureValues queueServiceSasSignatureValues, Context context) { return new QueueSasImplUtil(queueServiceSasSignatureValues, getQueueName()) .generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueuesGetPropertiesHeaders propertiesHeaders = response.getDeserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.getXMsMeta(), propertiesHeaders.getXMsApproximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdateMessageResult> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdsUpdateHeaders headers = response.getDeserializedHeaders(); UpdateMessageResult updateMessageResult = new UpdateMessageResult(headers.getXMsPopreceipt(), headers.getXMsTimeNextVisible()); return new SimpleResponse<>(response, updateMessageResult); } }
class QueueAsyncClient { private final ClientLogger logger = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; private final String accountName; private final QueueServiceVersion serviceVersion; private final QueueMessageEncoding messageEncoding; private final Function<QueueMessageDecodingError, Mono<Void>> processMessageDecodingErrorAsyncHandler; private final Consumer<QueueMessageDecodingError> processMessageDecodingErrorHandler; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@link * Each service call goes through the {@link HttpPipeline pipeline}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName, String accountName, QueueServiceVersion serviceVersion, QueueMessageEncoding messageEncoding, Function<QueueMessageDecodingError, Mono<Void>> processMessageDecodingErrorAsyncHandler, Consumer<QueueMessageDecodingError> processMessageDecodingErrorHandler) { Objects.requireNonNull(queueName, "'queueName' cannot be null."); this.queueName = queueName; this.client = client; this.accountName = accountName; this.serviceVersion = serviceVersion; this.messageEncoding = messageEncoding; this.processMessageDecodingErrorAsyncHandler = processMessageDecodingErrorAsyncHandler; this.processMessageDecodingErrorHandler = processMessageDecodingErrorHandler; } /** * @return the URL of the storage queue */ public String getQueueUrl() { return String.format("%s/%s", client.getUrl(), queueName); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public QueueServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the message encoding the client is using. * * @return the message encoding the client is using. */ public QueueMessageEncoding getMessageEncoding() { return messageEncoding; } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws QueueStorageException If a queue with the same name already exists in the queue service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> create() { try { return createWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the queue. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @return A response that only contains headers and response status code * @throws QueueStorageException If a queue with the same name and different metadata already exists in the queue * service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> createWithResponse(Map<String, String> metadata) { try { return withContext(context -> createWithResponse(metadata, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> createWithResponse(Map<String, String> metadata, Context context) { context = context == null ? Context.NONE : context; return client.getQueues().createWithResponseAsync(queueName, null, metadata, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { try { return deleteWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse() { try { return withContext(this::deleteWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteWithResponse(Context context) { context = context == null ? Context.NONE : context; return client.getQueues().deleteWithResponseAsync(queueName, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getProperties() { try { return getPropertiesWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getPropertiesWithResponse() { try { return withContext(this::getPropertiesWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<QueueProperties>> getPropertiesWithResponse(Context context) { context = context == null ? Context.NONE : context; return client.getQueues().getPropertiesWithResponseAsync(queueName, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> setMetadata(Map<String, String> metadata) { try { return setMetadataWithResponse(metadata).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata) { try { return withContext(context -> setMetadataWithResponse(metadata, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata, Context context) { context = context == null ? Context.NONE : context; return client.getQueues() .setMetadataWithResponseAsync(queueName, null, metadata, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * <p>For more information, see the * <a href="https: * * @return The stored access policies specified on the queue. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueSignedIdentifier> getAccessPolicy() { try { Function<String, Mono<PagedResponse<QueueSignedIdentifier>>> retriever = marker -> this.client.getQueues() .getAccessPolicyWithResponseAsync(queueName, null, null, Context.NONE) .map(response -> new PagedResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue(), null, response.getDeserializedHeaders())); return new PagedFlux<>(() -> retriever.apply(null), retriever); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.setAccessPolicy * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return An empty response * @throws QueueStorageException If the queue doesn't exist, a stored access policy doesn't have all fields filled * out, or the queue will have more than five policies. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> setAccessPolicy(Iterable<QueueSignedIdentifier> permissions) { try { return setAccessPolicyWithResponse(permissions).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.setAccessPolicyWithResponse * * <p>For more information, see the * <a href="https: * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist, a stored access policy doesn't have all fields filled * out, or the queue will have more than five policies. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> setAccessPolicyWithResponse(Iterable<QueueSignedIdentifier> permissions) { try { return withContext(context -> setAccessPolicyWithResponse(permissions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> setAccessPolicyWithResponse(Iterable<QueueSignedIdentifier> permissions, Context context) { context = context == null ? Context.NONE : context; /* We truncate to seconds because the service only supports nanoseconds or seconds, but doing an OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This allows for proper serialization with no real detriment to users as sub-second precision on active time for signed identifiers is not really necessary. */ if (permissions != null) { for (QueueSignedIdentifier permission : permissions) { if (permission.getAccessPolicy() != null && permission.getAccessPolicy().getStartsOn() != null) { permission.getAccessPolicy().setStartsOn( permission.getAccessPolicy().getStartsOn().truncatedTo(ChronoUnit.SECONDS)); } if (permission.getAccessPolicy() != null && permission.getAccessPolicy().getExpiresOn() != null) { permission.getAccessPolicy().setExpiresOn( permission.getAccessPolicy().getExpiresOn().truncatedTo(ChronoUnit.SECONDS)); } } } List<QueueSignedIdentifier> permissionsList = StreamSupport.stream( permissions != null ? permissions.spliterator() : Spliterators.emptySpliterator(), false) .collect(Collectors.toList()); return client.getQueues() .setAccessPolicyWithResponseAsync(queueName, null, null, permissionsList, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * <p>For more information, see the * <a href="https: * * @return An empty response * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> clearMessages() { try { return clearMessagesWithResponse().flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} * * <p>For more information, see the * <a href="https: * * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> clearMessagesWithResponse() { try { return withContext(this::clearMessagesWithResponse); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> clearMessagesWithResponse(Context context) { context = context == null ? Context.NONE : context; return client.getMessages().clearWithResponseAsync(queueName, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessage * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SendMessageResult> sendMessage(String messageText) { try { return sendMessageWithResponse(messageText, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessage * * <p>For more information, see the * <a href="https: * * @param message Message content * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.sendMessageWithResponse-liveTime * * <p>For more information, see the * <a href="https: * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue. If * unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue. If unset the value will default to * 7 days, if {@code Duration.ofSeconds(-1)} is passed the message will not expire. * The time to live must be {@code Duration.ofSeconds(-1)} or any positive number of seconds. * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SendMessageResult>> sendMessageWithResponse(String messageText, Duration visibilityTimeout, Duration timeToLive) { try { return withContext(context -> sendMessageWithResponse(BinaryData.fromString(messageText), visibilityTimeout, timeToLive, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessageWithResponse * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.sendMessageWithResponse-liveTime * * <p>For more information, see the * <a href="https: * * @param message Message content. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue. If * unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue. If unset the value will default to * 7 days, if {@code Duration.ofSeconds(-1)} is passed the message will not expire. * The time to live must be {@code Duration.ofSeconds(-1)} or any positive number of seconds. * @return A {@link SendMessageResult} value that contains the {@link SendMessageResult * and {@link SendMessageResult * and other metadata about the enqueued message. * @throws QueueStorageException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SendMessageResult>> sendMessageWithResponse(BinaryData message, Duration visibilityTimeout, Duration timeToLive) { try { return withContext(context -> sendMessageWithResponse(message, visibilityTimeout, timeToLive, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<SendMessageResult>> sendMessageWithResponse(BinaryData message, Duration visibilityTimeout, Duration timeToLive, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); context = context == null ? Context.NONE : context; Context finalContext = context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE); return encodeMessage(message) .flatMap(messageText -> { QueueMessage queueMessage = new QueueMessage().setMessageText(messageText); return client.getMessages() .enqueueWithResponseAsync(queueName, queueMessage, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, finalContext) .map(response -> new SimpleResponse<>(response, response.getValue().get(0))); }); } private Mono<String> encodeMessage(BinaryData message) { Objects.requireNonNull(message, "'message' cannot be null."); switch (messageEncoding) { case NONE: return Mono.just(message.toString()); case BASE64: return Mono.just(Base64.getEncoder().encodeToString(message.toBytes())); default: return FluxUtil.monoError( logger, new IllegalArgumentException("Unsupported message encoding=" + messageEncoding)); } } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessage} * * <p>For more information, see the * <a href="https: * * @return The first {@link QueueMessageItem} in the queue, it contains {@link QueueMessageItem * messageId} and {@link QueueMessageItem * additionally it contains other metadata about the message. * @throws QueueStorageException If the queue doesn't exist */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueMessageItem> receiveMessage() { try { return receiveMessagesWithOptionalTimeout(1, null, null, Context.NONE).singleOrEmpty(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 * seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue * than requested all the messages will be returned. If left empty only 1 message will be retrieved, the allowed * range is 1 to 32 messages. * @return Up to {@code maxMessages} {@link QueueMessageItem ReceiveMessageItem} from the queue. * Each DequeuedMessage contains {@link QueueMessageItem * {@link QueueMessageItem * other metadata about the message. * @throws QueueStorageException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueMessageItem> receiveMessages(Integer maxMessages) { try { return receiveMessagesWithOptionalTimeout(maxMessages, null, null, Context.NONE); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue * than requested all the messages will be returned. If left empty only 1 message will be retrieved, the allowed * range is 1 to 32 messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue. If left * empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link QueueMessageItem DequeuedMessages} from the queue. Each DeqeuedMessage * contains {@link QueueMessageItem * {@link QueueMessageItem * used to interact with the message and other metadata about the message. * @throws QueueStorageException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueMessageItem> receiveMessages(Integer maxMessages, Duration visibilityTimeout) { try { return receiveMessagesWithOptionalTimeout(maxMessages, visibilityTimeout, null, Context.NONE); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<QueueMessageItem> receiveMessagesWithOptionalTimeout(Integer maxMessages, Duration visibilityTimeout, Duration timeout, Context context) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Function<String, Mono<PagedResponse<QueueMessageItem>>> retriever = marker -> StorageImplUtils.applyOptionalTimeout(this.client.getMessages() .dequeueWithResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, context), timeout) .flatMap(this::transformMessagesDequeueResponse); return new PagedFlux<>(() -> retriever.apply(null), retriever); } private Mono<PagedResponseBase<MessagesDequeueHeaders, QueueMessageItem>> transformMessagesDequeueResponse( MessagesDequeueResponse response) { List<QueueMessageItemInternal> queueMessageInternalItems = response.getValue(); if (queueMessageInternalItems == null) { queueMessageInternalItems = Collections.emptyList(); } return Flux.fromIterable(queueMessageInternalItems) .flatMapSequential(queueMessageItemInternal -> transformQueueMessageItemInternal(queueMessageItemInternal, messageEncoding) .onErrorResume(IllegalArgumentException.class, e -> { if (processMessageDecodingErrorAsyncHandler != null) { return transformQueueMessageItemInternal( queueMessageItemInternal, QueueMessageEncoding.NONE) .flatMap(messageItem -> processMessageDecodingErrorAsyncHandler.apply( new QueueMessageDecodingError( this, new QueueClient(this), messageItem, null, e))) .then(Mono.empty()); } else if (processMessageDecodingErrorHandler != null) { return transformQueueMessageItemInternal( queueMessageItemInternal, QueueMessageEncoding.NONE) .flatMap(messageItem -> { try { processMessageDecodingErrorHandler.accept( new QueueMessageDecodingError( this, new QueueClient(this), messageItem, null, e)); return Mono.<QueueMessageItem>empty(); } catch (RuntimeException re) { return FluxUtil.<QueueMessageItem>monoError(logger, re); } }) .subscribeOn(Schedulers.boundedElastic()); } else { return FluxUtil.monoError(logger, e); } })) .collectList() .map(queueMessageItems -> new PagedResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), queueMessageItems, null, response.getDeserializedHeaders())); } private Mono<QueueMessageItem> transformQueueMessageItemInternal( QueueMessageItemInternal queueMessageItemInternal, QueueMessageEncoding messageEncoding) { QueueMessageItem queueMessageItem = new QueueMessageItem() .setMessageId(queueMessageItemInternal.getMessageId()) .setDequeueCount(queueMessageItemInternal.getDequeueCount()) .setExpirationTime(queueMessageItemInternal.getExpirationTime()) .setInsertionTime(queueMessageItemInternal.getInsertionTime()) .setPopReceipt(queueMessageItemInternal.getPopReceipt()) .setTimeNextVisible(queueMessageItemInternal.getTimeNextVisible()); return decodeMessageBody(queueMessageItemInternal.getMessageText(), messageEncoding) .map(queueMessageItem::setBody) .switchIfEmpty(Mono.just(queueMessageItem)); } private Mono<BinaryData> decodeMessageBody(String messageText, QueueMessageEncoding messageEncoding) { if (messageText == null) { return Mono.empty(); } switch (messageEncoding) { case NONE: return Mono.just(BinaryData.fromString(messageText)); case BASE64: try { return Mono.just(BinaryData.fromBytes(Base64.getDecoder().decode(messageText))); } catch (IllegalArgumentException e) { return FluxUtil.monoError(logger, e); } default: return FluxUtil.monoError( logger, new IllegalArgumentException("Unsupported message encoding=" + messageEncoding)); } } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessage} * * <p>For more information, see the * <a href="https: * * @return A {@link PeekedMessageItem} that contains metadata about the message. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PeekedMessageItem> peekMessage() { try { return peekMessages(null).singleOrEmpty(); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * <p>For more information, see the * <a href="https: * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue * than requested all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is * 1 to 32 messages. * @return Up to {@code maxMessages} {@link PeekedMessageItem PeekedMessages} from the queue. Each PeekedMessage * contains metadata about the message. * @throws QueueStorageException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PeekedMessageItem> peekMessages(Integer maxMessages) { try { return peekMessagesWithOptionalTimeout(maxMessages, null, Context.NONE); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<PeekedMessageItem> peekMessagesWithOptionalTimeout(Integer maxMessages, Duration timeout, Context context) { Function<String, Mono<PagedResponse<PeekedMessageItem>>> retriever = marker -> StorageImplUtils.applyOptionalTimeout(this.client.getMessages() .peekWithResponseAsync(queueName, maxMessages, null, null, context), timeout) .flatMap(this::transformMessagesPeekResponse); return new PagedFlux<>(() -> retriever.apply(null), retriever); } private Mono<PagedResponseBase<MessagesPeekHeaders, PeekedMessageItem>> transformMessagesPeekResponse( MessagesPeekResponse response) { List<PeekedMessageItemInternal> peekedMessageInternalItems = response.getValue(); if (peekedMessageInternalItems == null) { peekedMessageInternalItems = Collections.emptyList(); } return Flux.fromIterable(peekedMessageInternalItems) .flatMapSequential(peekedMessageItemInternal -> transformPeekedMessageItemInternal(peekedMessageItemInternal, messageEncoding) .onErrorResume(IllegalArgumentException.class, e -> { if (processMessageDecodingErrorAsyncHandler != null) { return transformPeekedMessageItemInternal( peekedMessageItemInternal, QueueMessageEncoding.NONE) .flatMap(messageItem -> processMessageDecodingErrorAsyncHandler.apply( new QueueMessageDecodingError( this, new QueueClient(this), null, messageItem, e))) .then(Mono.empty()); } else if (processMessageDecodingErrorHandler != null) { return transformPeekedMessageItemInternal( peekedMessageItemInternal, QueueMessageEncoding.NONE) .flatMap(messageItem -> { try { processMessageDecodingErrorHandler.accept( new QueueMessageDecodingError( this, new QueueClient(this), null, messageItem, e)); return Mono.<PeekedMessageItem>empty(); } catch (RuntimeException re) { return FluxUtil.<PeekedMessageItem>monoError(logger, re); } }) .subscribeOn(Schedulers.boundedElastic()); } else { return FluxUtil.monoError(logger, e); } })) .collectList() .map(peekedMessageItems -> new PagedResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), peekedMessageItems, null, response.getDeserializedHeaders())); } private Mono<PeekedMessageItem> transformPeekedMessageItemInternal( PeekedMessageItemInternal peekedMessageItemInternal, QueueMessageEncoding messageEncoding) { PeekedMessageItem peekedMessageItem = new PeekedMessageItem() .setMessageId(peekedMessageItemInternal.getMessageId()) .setDequeueCount(peekedMessageItemInternal.getDequeueCount()) .setExpirationTime(peekedMessageItemInternal.getExpirationTime()) .setInsertionTime(peekedMessageItemInternal.getInsertionTime()); return decodeMessageBody(peekedMessageItemInternal.getMessageText(), messageEncoding) .map(peekedMessageItem::setBody) .switchIfEmpty(Mono.just(peekedMessageItem)); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.updateMessage * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param messageText Updated value for the message * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. The default value is Duration.ZERO. * @return A {@link UpdateMessageResult} that contains the new * {@link UpdateMessageResult * additionally contains the updated metadata about the message. * @throws QueueStorageException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UpdateMessageResult> updateMessage(String messageId, String popReceipt, String messageText, Duration visibilityTimeout) { try { return updateMessageWithResponse(messageId, popReceipt, messageText, visibilityTimeout) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.updateMessageWithResponse * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param messageText Updated value for the message * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. The default value is Duration.ZERO. * @return A {@link UpdateMessageResult} that contains the new * {@link UpdateMessageResult * additionally contains the updated metadata about the message. * @throws QueueStorageException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UpdateMessageResult>> updateMessageWithResponse(String messageId, String popReceipt, String messageText, Duration visibilityTimeout) { try { return withContext(context -> updateMessageWithResponse(messageId, popReceipt, messageText, visibilityTimeout, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<UpdateMessageResult>> updateMessageWithResponse(String messageId, String popReceipt, String messageText, Duration visibilityTimeout, Context context) { QueueMessage message = messageText == null ? null : new QueueMessage().setMessageText(messageText); context = context == null ? Context.NONE : context; visibilityTimeout = visibilityTimeout == null ? Duration.ZERO : visibilityTimeout; return client.getMessageIds().updateWithResponseAsync(queueName, messageId, popReceipt, (int) visibilityTimeout.getSeconds(), null, null, message, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.deleteMessage * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return An empty response * @throws QueueStorageException If the queue or messageId don't exist or the popReceipt doesn't match on the * message. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteMessage(String messageId, String popReceipt) { try { return deleteMessageWithResponse(messageId, popReceipt).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.deleteMessageWithResponse * * <p>For more information, see the * <a href="https: * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws QueueStorageException If the queue or messageId don't exist or the popReceipt doesn't match on the * message. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteMessageWithResponse(String messageId, String popReceipt) { try { return withContext(context -> deleteMessageWithResponse(messageId, popReceipt, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> deleteMessageWithResponse(String messageId, String popReceipt, Context context) { context = context == null ? Context.NONE : context; return client.getMessageIds().deleteWithResponseAsync(queueName, messageId, popReceipt, null, null, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, null)); } /** * Get the queue name of the client. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getQueueName} * * @return The name of the queue. */ public String getQueueName() { return queueName; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return this.accountName; } /** * Generates a service sas for the queue using the specified {@link QueueServiceSasSignatureValues} * <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link QueueServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.generateSas * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * * @return A {@code String} representing the SAS query parameters. */ public String generateSas(QueueServiceSasSignatureValues queueServiceSasSignatureValues) { return generateSas(queueServiceSasSignatureValues, Context.NONE); } /** * Generates a service sas for the queue using the specified {@link QueueServiceSasSignatureValues} * <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link QueueServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.queue.QueueAsyncClient.generateSas * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. * * @return A {@code String} representing the SAS query parameters. */ public String generateSas(QueueServiceSasSignatureValues queueServiceSasSignatureValues, Context context) { return new QueueSasImplUtil(queueServiceSasSignatureValues, getQueueName()) .generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueuesGetPropertiesHeaders propertiesHeaders = response.getDeserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.getXMsMeta(), propertiesHeaders.getXMsApproximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdateMessageResult> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdsUpdateHeaders headers = response.getDeserializedHeaders(); UpdateMessageResult updateMessageResult = new UpdateMessageResult(headers.getXMsPopreceipt(), headers.getXMsTimeNextVisible()); return new SimpleResponse<>(response, updateMessageResult); } }
nit: the indent is not right here.
private ApplicationContextRunner getContextRunnerWithProperties() { return new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(ServiceBusJMSAutoConfiguration.class, JmsAutoConfiguration.class)) .withPropertyValues( "spring.jms.servicebus.connection-string=" + CONNECTION_STRING, "spring.jms.servicebus.topic-client-id=cid", "spring.jms.servicebus.idle-timeout=123" ); }
"spring.jms.servicebus.connection-string=" + CONNECTION_STRING,
private ApplicationContextRunner getContextRunnerWithProperties() { return new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(ServiceBusJMSAutoConfiguration.class, JmsAutoConfiguration.class)) .withPropertyValues( "spring.jms.servicebus.connection-string=" + CONNECTION_STRING, "spring.jms.servicebus.topic-client-id=cid", "spring.jms.servicebus.idle-timeout=123" ); }
class ServiceBusJMSAutoConfigurationTest { private static final String CONNECTION_STRING = "Endpoint=sb: @Test public void testAzureServiceBusDisabled() { ApplicationContextRunner contextRunner = getEmptyContextRunner(); contextRunner.withPropertyValues("spring.jms.servicebus.enabled=false") .run(context -> assertThat(context).doesNotHaveBean(AzureServiceBusJMSProperties.class)); } @Test public void testWithoutServiceBusJMSNamespace() { ApplicationContextRunner contextRunner = getEmptyContextRunner(); contextRunner.withClassLoader(new FilteredClassLoader(JmsConnectionFactory.class)) .run(context -> assertThat(context).doesNotHaveBean(AzureServiceBusJMSProperties.class)); } @Test(expected = IllegalStateException.class) public void testAzureServiceBusJMSPropertiesValidation() { ApplicationContextRunner contextRunner = getEmptyContextRunner(); contextRunner.run(context -> context.getBean(AzureServiceBusJMSProperties.class)); } @Test public void testCachingConnectionFactoryIsAutowired() { ApplicationContextRunner contextRunner = getContextRunnerWithProperties(); contextRunner.run( context -> { assertThat(context).hasSingleBean(ConnectionFactory.class); assertThat(context).hasSingleBean(JmsTemplate.class); ConnectionFactory connectionFactory = context.getBean(ConnectionFactory.class); assertTrue(connectionFactory == context.getBean(JmsTemplate.class).getConnectionFactory()); } ); } @Test public void testAzureServiceBusJMSPropertiesConfigured() { ApplicationContextRunner contextRunner = getContextRunnerWithProperties(); contextRunner.run( context -> { assertThat(context).hasSingleBean(AzureServiceBusJMSProperties.class); assertThat(context.getBean(AzureServiceBusJMSProperties.class).getConnectionString()).isEqualTo( CONNECTION_STRING); assertThat(context.getBean(AzureServiceBusJMSProperties.class).getTopicClientId()).isEqualTo("cid"); assertThat(context.getBean(AzureServiceBusJMSProperties.class).getIdleTimeout()).isEqualTo(123); } ); } private ApplicationContextRunner getEmptyContextRunner() { return new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(ServiceBusJMSAutoConfiguration.class, JmsAutoConfiguration.class)); } }
class ServiceBusJMSAutoConfigurationTest { private static final String CONNECTION_STRING = "Endpoint=sb: + "SharedAccessKey=sasKey"; @Test public void testAzureServiceBusDisabled() { ApplicationContextRunner contextRunner = getEmptyContextRunner(); contextRunner.withPropertyValues("spring.jms.servicebus.enabled=false") .run(context -> assertThat(context).doesNotHaveBean(AzureServiceBusJMSProperties.class)); } @Test public void testWithoutServiceBusJMSNamespace() { ApplicationContextRunner contextRunner = getEmptyContextRunner(); contextRunner.withClassLoader(new FilteredClassLoader(JmsConnectionFactory.class)) .run(context -> assertThat(context).doesNotHaveBean(AzureServiceBusJMSProperties.class)); } @Test(expected = IllegalStateException.class) public void testAzureServiceBusJMSPropertiesValidation() { ApplicationContextRunner contextRunner = getEmptyContextRunner(); contextRunner.run(context -> context.getBean(AzureServiceBusJMSProperties.class)); } @Test public void testCachingConnectionFactoryIsAutowired() { ApplicationContextRunner contextRunner = getContextRunnerWithProperties(); contextRunner.run( context -> { assertThat(context).hasSingleBean(ConnectionFactory.class); assertThat(context).hasSingleBean(JmsTemplate.class); ConnectionFactory connectionFactory = context.getBean(ConnectionFactory.class); assertTrue(connectionFactory == context.getBean(JmsTemplate.class).getConnectionFactory()); } ); } @Test public void testAzureServiceBusJMSPropertiesConfigured() { ApplicationContextRunner contextRunner = getContextRunnerWithProperties(); contextRunner.run( context -> { assertThat(context).hasSingleBean(AzureServiceBusJMSProperties.class); assertThat(context.getBean(AzureServiceBusJMSProperties.class).getConnectionString()).isEqualTo( CONNECTION_STRING); assertThat(context.getBean(AzureServiceBusJMSProperties.class).getTopicClientId()).isEqualTo("cid"); assertThat(context.getBean(AzureServiceBusJMSProperties.class).getIdleTimeout()).isEqualTo(123); } ); } private ApplicationContextRunner getEmptyContextRunner() { return new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(ServiceBusJMSAutoConfiguration.class, JmsAutoConfiguration.class)); } }
Here is the only code change.
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() { if (this.innerModel().identity() == null) { if (isInCreateMode()) { createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED)); } else { updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED)); } } return this; }
}
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() { if (this.innerModel().identity() == null) { if (isInCreateMode()) { createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED)); } else { updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED)); } } return this; }
class StorageAccountImpl extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager> implements StorageAccount, StorageAccount.Definition, StorageAccount.Update { private final ClientLogger logger = new ClientLogger(getClass()); private PublicEndpoints publicEndpoints; private AccountStatuses accountStatuses; private StorageAccountCreateParameters createParameters; private StorageAccountUpdateParameters updateParameters; private StorageNetworkRulesHelper networkRulesHelper; private StorageEncryptionHelper encryptionHelper; StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) { super(name, innerModel, storageManager); this.createParameters = new StorageAccountCreateParameters(); this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters); this.encryptionHelper = new StorageEncryptionHelper(this.createParameters); } @Override public AccountStatuses accountStatuses() { if (accountStatuses == null) { accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary()); } return accountStatuses; } @Override public StorageAccountSkuType skuType() { return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name()); } @Override public Kind kind() { return innerModel().kind(); } @Override public OffsetDateTime creationTime() { return this.innerModel().creationTime(); } @Override public CustomDomain customDomain() { return this.innerModel().customDomain(); } @Override public OffsetDateTime lastGeoFailoverTime() { return this.innerModel().lastGeoFailoverTime(); } @Override public ProvisioningState provisioningState() { return this.innerModel().provisioningState(); } @Override public PublicEndpoints endPoints() { if (publicEndpoints == null) { publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints()); } return publicEndpoints; } @Override public StorageAccountEncryptionKeySource encryptionKeySource() { return StorageEncryptionHelper.encryptionKeySource(this.innerModel()); } @Override public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() { return StorageEncryptionHelper.encryptionStatuses(this.innerModel()); } @Override public AccessTier accessTier() { return innerModel().accessTier(); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() == null) { return null; } else { return this.innerModel().identity().tenantId(); } } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() == null) { return null; } else { return this.innerModel().identity().principalId(); } } @Override public boolean isAccessAllowedFromAllNetworks() { return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel()); } @Override public List<String> networkSubnetsWithAccess() { return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel()); } @Override public List<String> ipAddressesWithAccess() { return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel()); } @Override public List<String> ipAddressRangesWithAccess() { return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel()); } @Override public boolean canReadLogEntriesFromAnyNetwork() { return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel()); } @Override public boolean canReadMetricsFromAnyNetwork() { return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel()); } @Override public boolean canAccessFromAzureServices() { return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel()); } @Override public boolean isAzureFilesAadIntegrationEnabled() { return this.innerModel().azureFilesIdentityBasedAuthentication() != null && this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions() == DirectoryServiceOptions.AADDS; } @Override public boolean isHnsEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled()); } @Override public boolean isLargeFileSharesEnabled() { return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED; } @Override public List<StorageAccountKey> getKeys() { return this.getKeysAsync().block(); } @Override public Mono<List<StorageAccountKey>> getKeysAsync() { return this .manager() .serviceClient() .getStorageAccounts() .listKeysAsync(this.resourceGroupName(), this.name()) .map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys()); } @Override public List<StorageAccountKey> regenerateKey(String keyName) { return this.regenerateKeyAsync(keyName).block(); } @Override public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) { return this .manager() .serviceClient() .getStorageAccounts() .regenerateKeyAsync(this.resourceGroupName(), this.name(), keyName) .map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys()); } @Override public Mono<StorageAccount> refreshAsync() { return super .refreshAsync() .map( storageAccount -> { StorageAccountImpl impl = (StorageAccountImpl) storageAccount; impl.clearWrapperProperties(); return impl; }); } @Override protected Mono<StorageAccountInner> getInnerAsync() { return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public StorageAccountImpl withSku(StorageAccountSkuType sku) { if (isInCreateMode()) { createParameters.withSku(new Sku().withName(sku.name())); } else { updateParameters.withSku(new Sku().withName(sku.name())); } return this; } @Override public StorageAccountImpl withBlobStorageAccountKind() { createParameters.withKind(Kind.BLOB_STORAGE); return this; } @Override public StorageAccountImpl withGeneralPurposeAccountKind() { createParameters.withKind(Kind.STORAGE); return this; } @Override public StorageAccountImpl withGeneralPurposeAccountKindV2() { createParameters.withKind(Kind.STORAGE_V2); return this; } @Override public StorageAccountImpl withBlockBlobStorageAccountKind() { createParameters.withKind(Kind.BLOCK_BLOB_STORAGE); return this; } @Override public StorageAccountImpl withFileStorageAccountKind() { createParameters.withKind(Kind.FILE_STORAGE); return this; } @Override public StorageAccountImpl withBlobEncryption() { this.encryptionHelper.withBlobEncryption(); return this; } @Override public StorageAccountImpl withFileEncryption() { this.encryptionHelper.withFileEncryption(); return this; } @Override public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) { this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion); return this; } @Override public StorageAccountImpl withoutBlobEncryption() { this.encryptionHelper.withoutBlobEncryption(); return this; } @Override public StorageAccountImpl withoutFileEncryption() { this.encryptionHelper.withoutFileEncryption(); return this; } private void clearWrapperProperties() { accountStatuses = null; publicEndpoints = null; } @Override public StorageAccountImpl update() { createParameters = null; updateParameters = new StorageAccountUpdateParameters(); this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel()); this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel()); return super.update(); } @Override public StorageAccountImpl withCustomDomain(CustomDomain customDomain) { if (isInCreateMode()) { createParameters.withCustomDomain(customDomain); } else { updateParameters.withCustomDomain(customDomain); } return this; } @Override public StorageAccountImpl withCustomDomain(String name) { return withCustomDomain(new CustomDomain().withName(name)); } @Override public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) { return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain)); } @Override public StorageAccountImpl withAccessTier(AccessTier accessTier) { if (isInCreateMode()) { createParameters.withAccessTier(accessTier); } else { if (this.innerModel().kind() != Kind.BLOB_STORAGE) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Access tier can not be changed for general purpose storage accounts.")); } updateParameters.withAccessTier(accessTier); } return this; } @Override @Override public StorageAccountImpl withOnlyHttpsTraffic() { if (isInCreateMode()) { createParameters.withEnableHttpsTrafficOnly(true); } else { updateParameters.withEnableHttpsTrafficOnly(true); } return this; } @Override public StorageAccountImpl withHttpAndHttpsTraffic() { updateParameters.withEnableHttpsTrafficOnly(false); return this; } @Override public StorageAccountImpl withAccessFromAllNetworks() { this.networkRulesHelper.withAccessFromAllNetworks(); return this; } @Override public StorageAccountImpl withAccessFromSelectedNetworks() { this.networkRulesHelper.withAccessFromSelectedNetworks(); return this; } @Override public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) { this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId); return this; } @Override public StorageAccountImpl withAccessFromIpAddress(String ipAddress) { this.networkRulesHelper.withAccessFromIpAddress(ipAddress); return this; } @Override public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) { this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr); return this; } @Override public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() { this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork(); return this; } @Override public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() { this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork(); return this; } @Override public StorageAccountImpl withAccessFromAzureServices() { this.networkRulesHelper.withAccessAllowedFromAzureServices(); return this; } @Override public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) { this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId); return this; } @Override public StorageAccountImpl withoutIpAddressAccess(String ipAddress) { this.networkRulesHelper.withoutIpAddressAccess(ipAddress); return this; } @Override public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) { this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr); return this; } @Override public Update withoutReadAccessToLoggingFromAnyNetwork() { this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork(); return this; } @Override public Update withoutReadAccessToMetricsFromAnyNetwork() { this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork(); return this; } @Override public Update withoutAccessFromAzureServices() { this.networkRulesHelper.withoutAccessFromAzureServices(); return this; } @Override public Update upgradeToGeneralPurposeAccountKindV2() { updateParameters.withKind(Kind.STORAGE_V2); return this; } @Override public Mono<StorageAccount> createResourceAsync() { this.networkRulesHelper.setDefaultActionIfRequired(); createParameters.withLocation(this.regionName()); createParameters.withTags(this.innerModel().tags()); final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts(); return this .manager() .serviceClient() .getStorageAccounts() .createAsync(this.resourceGroupName(), this.name(), createParameters) .flatMap( storageAccountInner -> client .getByResourceGroupAsync(resourceGroupName(), this.name()) .map(innerToFluentMap(this)) .doOnNext(storageAccount -> clearWrapperProperties())); } @Override public Mono<StorageAccount> updateResourceAsync() { this.networkRulesHelper.setDefaultActionIfRequired(); updateParameters.withTags(this.innerModel().tags()); return this .manager() .serviceClient() .getStorageAccounts() .updateAsync(resourceGroupName(), this.name(), updateParameters) .map(innerToFluentMap(this)) .doOnNext(storageAccount -> clearWrapperProperties()); } @Override public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) { if (isInCreateMode()) { if (enabled) { this .createParameters .withAzureFilesIdentityBasedAuthentication( new AzureFilesIdentityBasedAuthentication() .withDirectoryServiceOptions(DirectoryServiceOptions.AADDS)); } } else { if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) { this .createParameters .withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication()); } if (enabled) { this .updateParameters .azureFilesIdentityBasedAuthentication() .withDirectoryServiceOptions(DirectoryServiceOptions.AADDS); } else { this .updateParameters .azureFilesIdentityBasedAuthentication() .withDirectoryServiceOptions(DirectoryServiceOptions.NONE); } } return this; } @Override public StorageAccountImpl withLargeFileShares(boolean enabled) { if (isInCreateMode()) { if (enabled) { this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED); } else { this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED); } } return this; } @Override public StorageAccountImpl withHnsEnabled(boolean enabled) { this.createParameters.withIsHnsEnabled(enabled); return this; } }
class StorageAccountImpl extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager> implements StorageAccount, StorageAccount.Definition, StorageAccount.Update { private final ClientLogger logger = new ClientLogger(getClass()); private PublicEndpoints publicEndpoints; private AccountStatuses accountStatuses; private StorageAccountCreateParameters createParameters; private StorageAccountUpdateParameters updateParameters; private StorageNetworkRulesHelper networkRulesHelper; private StorageEncryptionHelper encryptionHelper; StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) { super(name, innerModel, storageManager); this.createParameters = new StorageAccountCreateParameters(); this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters); this.encryptionHelper = new StorageEncryptionHelper(this.createParameters); } @Override public AccountStatuses accountStatuses() { if (accountStatuses == null) { accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary()); } return accountStatuses; } @Override public StorageAccountSkuType skuType() { return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name()); } @Override public Kind kind() { return innerModel().kind(); } @Override public OffsetDateTime creationTime() { return this.innerModel().creationTime(); } @Override public CustomDomain customDomain() { return this.innerModel().customDomain(); } @Override public OffsetDateTime lastGeoFailoverTime() { return this.innerModel().lastGeoFailoverTime(); } @Override public ProvisioningState provisioningState() { return this.innerModel().provisioningState(); } @Override public PublicEndpoints endPoints() { if (publicEndpoints == null) { publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints()); } return publicEndpoints; } @Override public StorageAccountEncryptionKeySource encryptionKeySource() { return StorageEncryptionHelper.encryptionKeySource(this.innerModel()); } @Override public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() { return StorageEncryptionHelper.encryptionStatuses(this.innerModel()); } @Override public AccessTier accessTier() { return innerModel().accessTier(); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() == null) { return null; } else { return this.innerModel().identity().tenantId(); } } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() == null) { return null; } else { return this.innerModel().identity().principalId(); } } @Override public boolean isAccessAllowedFromAllNetworks() { return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel()); } @Override public List<String> networkSubnetsWithAccess() { return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel()); } @Override public List<String> ipAddressesWithAccess() { return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel()); } @Override public List<String> ipAddressRangesWithAccess() { return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel()); } @Override public boolean canReadLogEntriesFromAnyNetwork() { return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel()); } @Override public boolean canReadMetricsFromAnyNetwork() { return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel()); } @Override public boolean canAccessFromAzureServices() { return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel()); } @Override public boolean isAzureFilesAadIntegrationEnabled() { return this.innerModel().azureFilesIdentityBasedAuthentication() != null && this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions() == DirectoryServiceOptions.AADDS; } @Override public boolean isHnsEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled()); } @Override public boolean isLargeFileSharesEnabled() { return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED; } @Override public List<StorageAccountKey> getKeys() { return this.getKeysAsync().block(); } @Override public Mono<List<StorageAccountKey>> getKeysAsync() { return this .manager() .serviceClient() .getStorageAccounts() .listKeysAsync(this.resourceGroupName(), this.name()) .map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys()); } @Override public List<StorageAccountKey> regenerateKey(String keyName) { return this.regenerateKeyAsync(keyName).block(); } @Override public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) { return this .manager() .serviceClient() .getStorageAccounts() .regenerateKeyAsync(this.resourceGroupName(), this.name(), keyName) .map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys()); } @Override public Mono<StorageAccount> refreshAsync() { return super .refreshAsync() .map( storageAccount -> { StorageAccountImpl impl = (StorageAccountImpl) storageAccount; impl.clearWrapperProperties(); return impl; }); } @Override protected Mono<StorageAccountInner> getInnerAsync() { return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public StorageAccountImpl withSku(StorageAccountSkuType sku) { if (isInCreateMode()) { createParameters.withSku(new Sku().withName(sku.name())); } else { updateParameters.withSku(new Sku().withName(sku.name())); } return this; } @Override public StorageAccountImpl withBlobStorageAccountKind() { createParameters.withKind(Kind.BLOB_STORAGE); return this; } @Override public StorageAccountImpl withGeneralPurposeAccountKind() { createParameters.withKind(Kind.STORAGE); return this; } @Override public StorageAccountImpl withGeneralPurposeAccountKindV2() { createParameters.withKind(Kind.STORAGE_V2); return this; } @Override public StorageAccountImpl withBlockBlobStorageAccountKind() { createParameters.withKind(Kind.BLOCK_BLOB_STORAGE); return this; } @Override public StorageAccountImpl withFileStorageAccountKind() { createParameters.withKind(Kind.FILE_STORAGE); return this; } @Override public StorageAccountImpl withBlobEncryption() { this.encryptionHelper.withBlobEncryption(); return this; } @Override public StorageAccountImpl withFileEncryption() { this.encryptionHelper.withFileEncryption(); return this; } @Override public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) { this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion); return this; } @Override public StorageAccountImpl withoutBlobEncryption() { this.encryptionHelper.withoutBlobEncryption(); return this; } @Override public StorageAccountImpl withoutFileEncryption() { this.encryptionHelper.withoutFileEncryption(); return this; } private void clearWrapperProperties() { accountStatuses = null; publicEndpoints = null; } @Override public StorageAccountImpl update() { createParameters = null; updateParameters = new StorageAccountUpdateParameters(); this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel()); this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel()); return super.update(); } @Override public StorageAccountImpl withCustomDomain(CustomDomain customDomain) { if (isInCreateMode()) { createParameters.withCustomDomain(customDomain); } else { updateParameters.withCustomDomain(customDomain); } return this; } @Override public StorageAccountImpl withCustomDomain(String name) { return withCustomDomain(new CustomDomain().withName(name)); } @Override public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) { return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain)); } @Override public StorageAccountImpl withAccessTier(AccessTier accessTier) { if (isInCreateMode()) { createParameters.withAccessTier(accessTier); } else { if (this.innerModel().kind() != Kind.BLOB_STORAGE) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Access tier can not be changed for general purpose storage accounts.")); } updateParameters.withAccessTier(accessTier); } return this; } @Override @Override public StorageAccountImpl withOnlyHttpsTraffic() { if (isInCreateMode()) { createParameters.withEnableHttpsTrafficOnly(true); } else { updateParameters.withEnableHttpsTrafficOnly(true); } return this; } @Override public StorageAccountImpl withHttpAndHttpsTraffic() { updateParameters.withEnableHttpsTrafficOnly(false); return this; } @Override public StorageAccountImpl withAccessFromAllNetworks() { this.networkRulesHelper.withAccessFromAllNetworks(); return this; } @Override public StorageAccountImpl withAccessFromSelectedNetworks() { this.networkRulesHelper.withAccessFromSelectedNetworks(); return this; } @Override public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) { this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId); return this; } @Override public StorageAccountImpl withAccessFromIpAddress(String ipAddress) { this.networkRulesHelper.withAccessFromIpAddress(ipAddress); return this; } @Override public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) { this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr); return this; } @Override public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() { this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork(); return this; } @Override public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() { this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork(); return this; } @Override public StorageAccountImpl withAccessFromAzureServices() { this.networkRulesHelper.withAccessAllowedFromAzureServices(); return this; } @Override public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) { this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId); return this; } @Override public StorageAccountImpl withoutIpAddressAccess(String ipAddress) { this.networkRulesHelper.withoutIpAddressAccess(ipAddress); return this; } @Override public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) { this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr); return this; } @Override public Update withoutReadAccessToLoggingFromAnyNetwork() { this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork(); return this; } @Override public Update withoutReadAccessToMetricsFromAnyNetwork() { this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork(); return this; } @Override public Update withoutAccessFromAzureServices() { this.networkRulesHelper.withoutAccessFromAzureServices(); return this; } @Override public Update upgradeToGeneralPurposeAccountKindV2() { updateParameters.withKind(Kind.STORAGE_V2); return this; } @Override public Mono<StorageAccount> createResourceAsync() { this.networkRulesHelper.setDefaultActionIfRequired(); createParameters.withLocation(this.regionName()); createParameters.withTags(this.innerModel().tags()); final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts(); return this .manager() .serviceClient() .getStorageAccounts() .createAsync(this.resourceGroupName(), this.name(), createParameters) .flatMap( storageAccountInner -> client .getByResourceGroupAsync(resourceGroupName(), this.name()) .map(innerToFluentMap(this)) .doOnNext(storageAccount -> clearWrapperProperties())); } @Override public Mono<StorageAccount> updateResourceAsync() { this.networkRulesHelper.setDefaultActionIfRequired(); updateParameters.withTags(this.innerModel().tags()); return this .manager() .serviceClient() .getStorageAccounts() .updateAsync(resourceGroupName(), this.name(), updateParameters) .map(innerToFluentMap(this)) .doOnNext(storageAccount -> clearWrapperProperties()); } @Override public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) { if (isInCreateMode()) { if (enabled) { this .createParameters .withAzureFilesIdentityBasedAuthentication( new AzureFilesIdentityBasedAuthentication() .withDirectoryServiceOptions(DirectoryServiceOptions.AADDS)); } } else { if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) { this .createParameters .withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication()); } if (enabled) { this .updateParameters .azureFilesIdentityBasedAuthentication() .withDirectoryServiceOptions(DirectoryServiceOptions.AADDS); } else { this .updateParameters .azureFilesIdentityBasedAuthentication() .withDirectoryServiceOptions(DirectoryServiceOptions.NONE); } } return this; } @Override public StorageAccountImpl withLargeFileShares(boolean enabled) { if (isInCreateMode()) { if (enabled) { this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED); } else { this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED); } } return this; } @Override public StorageAccountImpl withHnsEnabled(boolean enabled) { this.createParameters.withIsHnsEnabled(enabled); return this; } }
Can we use the static field in parent class? ``` objectMapper = OBJECT_MAPPER; ``` Change `OBJECT_MAPPER` to `protected`.
public ServiceBusMessageConverter() { objectMapper = new ObjectMapper(); }
objectMapper = new ObjectMapper();
public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<IMessage> { private static final Logger LOG = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(IMessage azureMessage) { MessageBody messageBody = azureMessage.getMessageBody(); if (messageBody == null) { return new byte[0]; } switch (messageBody.getBodyType()) { case BINARY: return messageBody.getBinaryData().stream().findFirst().orElse(null); case VALUE: return String.valueOf(messageBody.getValueData()).getBytes(StandardCharsets.UTF_8); case SEQUENCE: return toPayload(messageBody.getSequenceData().stream().findFirst().orElse(null)); default: return new byte[0]; } } @Override protected IMessage fromString(String payload) { return new Message(payload); } @Override protected IMessage fromByte(byte[] payload) { return new Message(payload); } @Override protected void setCustomHeaders(MessageHeaders headers, IMessage serviceBusMessage) { if (headers.containsKey(MessageHeaders.CONTENT_TYPE)) { Object contentType = headers.get(MessageHeaders.CONTENT_TYPE); if (contentType instanceof MimeType) { serviceBusMessage.setContentType(((MimeType) contentType).toString()); } else { serviceBusMessage.setContentType((String) contentType); } } if (headers.containsKey(MessageHeaders.ID)) { serviceBusMessage.setMessageId(String.valueOf(headers.get(MessageHeaders.ID, UUID.class))); } if (headers.containsKey(MessageHeaders.REPLY_CHANNEL)) { serviceBusMessage.setReplyTo(headers.get(MessageHeaders.REPLY_CHANNEL, String.class)); } if (headers.containsKey(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE)) { Integer integerValue = headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class); if (null != integerValue) { serviceBusMessage.setScheduledEnqueueTimeUtc(Instant.now().plus(Duration.ofMillis(integerValue))); } } headers.forEach((key, value) -> serviceBusMessage.getProperties().put(key, value.toString())); } @Override protected Map<String, Object> buildCustomHeaders(IMessage serviceBusMessage) { Map<String, Object> headers = new HashMap<>(); if (StringUtils.hasText(serviceBusMessage.getMessageId())) { headers.put(AzureHeaders.RAW_ID, serviceBusMessage.getMessageId()); } if (StringUtils.hasText(serviceBusMessage.getContentType())) { String contentType = serviceBusMessage.getContentType(); try { MimeType mimeType = MimeType.valueOf(contentType); headers.put(MessageHeaders.CONTENT_TYPE, mimeType.toString()); } catch (InvalidMimeTypeException e) { LOG.warn("Invalid mimeType '{}' from service bus message.", contentType); } } if (StringUtils.hasText(serviceBusMessage.getReplyTo())) { headers.put(MessageHeaders.REPLY_CHANNEL, serviceBusMessage.getReplyTo()); } headers.putAll(serviceBusMessage.getProperties()); return Collections.unmodifiableMap(headers); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<IMessage> { private static final Logger LOG = LoggerFactory.getLogger(ServiceBusMessageConverter.class); private final ObjectMapper objectMapper; public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(IMessage azureMessage) { MessageBody messageBody = azureMessage.getMessageBody(); if (messageBody == null) { return new byte[0]; } switch (messageBody.getBodyType()) { case BINARY: return messageBody.getBinaryData().stream().findFirst().orElse(null); case VALUE: return String.valueOf(messageBody.getValueData()).getBytes(StandardCharsets.UTF_8); case SEQUENCE: return toPayload(messageBody.getSequenceData().stream().findFirst().orElse(null)); default: return new byte[0]; } } @Override protected IMessage fromString(String payload) { return new Message(payload); } @Override protected IMessage fromByte(byte[] payload) { return new Message(payload); } @Override protected void setCustomHeaders(MessageHeaders headers, IMessage serviceBusMessage) { if (headers.containsKey(MessageHeaders.CONTENT_TYPE)) { Object contentType = headers.get(MessageHeaders.CONTENT_TYPE); if (contentType instanceof MimeType) { serviceBusMessage.setContentType(((MimeType) contentType).toString()); } else { serviceBusMessage.setContentType((String) contentType); } } if (headers.containsKey(MessageHeaders.ID)) { serviceBusMessage.setMessageId(String.valueOf(headers.get(MessageHeaders.ID, UUID.class))); } if (headers.containsKey(MessageHeaders.REPLY_CHANNEL)) { serviceBusMessage.setReplyTo(headers.get(MessageHeaders.REPLY_CHANNEL, String.class)); } if (headers.containsKey(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE)) { Integer integerValue = headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class); if (null != integerValue) { serviceBusMessage.setScheduledEnqueueTimeUtc(Instant.now().plus(Duration.ofMillis(integerValue))); } } headers.forEach((key, value) -> serviceBusMessage.getProperties().put(key, value.toString())); } @Override protected Map<String, Object> buildCustomHeaders(IMessage serviceBusMessage) { Map<String, Object> headers = new HashMap<>(); if (StringUtils.hasText(serviceBusMessage.getMessageId())) { headers.put(AzureHeaders.RAW_ID, serviceBusMessage.getMessageId()); } if (StringUtils.hasText(serviceBusMessage.getContentType())) { String contentType = serviceBusMessage.getContentType(); try { MimeType mimeType = MimeType.valueOf(contentType); headers.put(MessageHeaders.CONTENT_TYPE, mimeType.toString()); } catch (InvalidMimeTypeException e) { LOG.warn("Invalid mimeType '{}' from service bus message.", contentType); } } if (StringUtils.hasText(serviceBusMessage.getReplyTo())) { headers.put(MessageHeaders.REPLY_CHANNEL, serviceBusMessage.getReplyTo()); } headers.putAll(serviceBusMessage.getProperties()); return Collections.unmodifiableMap(headers); } }
nit: use `assertSame` instead of `assertTrue`.
public void testMessageConverterProvided() { this.contextRunner.withUserConfiguration( TestConfigWithMessageConverter.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); assertThat(context).hasSingleBean(ServiceBusQueueTemplate.class); ServiceBusMessageConverter messageConverter = context.getBean(ServiceBusMessageConverter.class); ServiceBusQueueTemplate queueTemplate = context.getBean(ServiceBusQueueTemplate.class); assertTrue(messageConverter == queueTemplate.getMessageConverter()); }); }
assertTrue(messageConverter == queueTemplate.getMessageConverter());
public void testMessageConverterProvided() { this.contextRunner.withUserConfiguration( TestConfigWithMessageConverter.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); assertThat(context).hasSingleBean(ServiceBusQueueTemplate.class); ServiceBusMessageConverter messageConverter = context.getBean(ServiceBusMessageConverter.class); ServiceBusQueueTemplate queueTemplate = context.getBean(ServiceBusQueueTemplate.class); assertSame(messageConverter, queueTemplate.getMessageConverter()); }); }
class AzureServiceBusQueueAutoConfigurationTest { private static final String SERVICE_BUS_PROPERTY_PREFIX = "spring.cloud.azure.servicebus."; private static final String AZURE_PROPERTY_PREFIX = "spring.cloud.azure."; private ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureServiceBusQueueAutoConfiguration.class)); @Test public void testAzureServiceBusDisabled() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "enabled=false") .run(context -> assertThat(context).doesNotHaveBean(ServiceBusQueueOperation.class)); } @Test public void testWithoutAzureServiceBusQueueClient() { this.contextRunner.withClassLoader(new FilteredClassLoader(QueueClient.class)) .run(context -> assertThat(context).doesNotHaveBean(ServiceBusQueueOperation.class)); } @Test public void testWithoutServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class) .run(context -> assertThat(context).doesNotHaveBean(ServiceBusQueueManager.class)); } @Test public void testWithServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithServiceBusNamespaceManager.class, TestConfigWithConnectionStringProvider.class) .run(context -> assertThat(context).hasSingleBean(ServiceBusQueueManager.class)); } @Test public void testQueueClientFactoryCreated() { this.contextRunner.withUserConfiguration(AzureServiceBusAutoConfiguration.class, TestConfigWithServiceBusNamespaceManager.class) .withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1") .run(context -> assertThat(context).hasSingleBean(ServiceBusQueueClientFactory.class) .hasSingleBean(ServiceBusQueueOperation.class)); } @Test public void testConnectionStringProvided() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1") .withUserConfiguration(AzureServiceBusAutoConfiguration.class) .run(context -> { assertThat(context.getBean(ServiceBusConnectionStringProvider.class).getConnectionString()).isEqualTo("str1"); assertThat(context).doesNotHaveBean(ServiceBusNamespaceManager.class); assertThat(context).doesNotHaveBean(ServiceBusQueueManager.class); assertThat(context).hasSingleBean(ServiceBusQueueClientFactory.class); assertThat(context).hasSingleBean(ServiceBusQueueOperation.class); assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); }); } @Test public void testResourceManagerProvided() { this.contextRunner.withUserConfiguration( TestConfigWithAzureResourceManager.class, TestConfigWithConnectionStringProvider.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( AZURE_PROPERTY_PREFIX + "resource-group=rg1", SERVICE_BUS_PROPERTY_PREFIX + "namespace=ns1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusQueueClientFactory.class); assertThat(context).hasSingleBean(ServiceBusQueueOperation.class); assertThat(context).hasSingleBean(ServiceBusNamespaceManager.class); assertThat(context).hasSingleBean(ServiceBusQueueManager.class); }); } @Test @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithServiceBusNamespaceManager { @Bean public ServiceBusNamespaceManager servicebusNamespaceManager() { return mock(ServiceBusNamespaceManager.class); } } @Configuration @EnableConfigurationProperties(AzureServiceBusProperties.class) public static class TestConfigWithConnectionStringProvider { @Bean public ServiceBusConnectionStringProvider serviceBusConnectionStringProvider() { return new ServiceBusConnectionStringProvider("fake"); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithAzureResourceManager { @Bean public AzureResourceManager azureResourceManager() { return mock(AzureResourceManager.class); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithMessageConverter { @Bean public ServiceBusMessageConverter messageConverter() { return mock(ServiceBusMessageConverter.class); } } }
class AzureServiceBusQueueAutoConfigurationTest { private static final String SERVICE_BUS_PROPERTY_PREFIX = "spring.cloud.azure.servicebus."; private static final String AZURE_PROPERTY_PREFIX = "spring.cloud.azure."; private ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureServiceBusQueueAutoConfiguration.class)); @Test public void testAzureServiceBusDisabled() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "enabled=false") .run(context -> assertThat(context).doesNotHaveBean(ServiceBusQueueOperation.class)); } @Test public void testWithoutAzureServiceBusQueueClient() { this.contextRunner.withClassLoader(new FilteredClassLoader(QueueClient.class)) .run(context -> assertThat(context).doesNotHaveBean(ServiceBusQueueOperation.class)); } @Test public void testWithoutServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class) .run(context -> assertThat(context).doesNotHaveBean(ServiceBusQueueManager.class)); } @Test public void testWithServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithServiceBusNamespaceManager.class, TestConfigWithConnectionStringProvider.class) .run(context -> assertThat(context).hasSingleBean(ServiceBusQueueManager.class)); } @Test public void testQueueClientFactoryCreated() { this.contextRunner.withUserConfiguration(AzureServiceBusAutoConfiguration.class, TestConfigWithServiceBusNamespaceManager.class) .withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1") .run(context -> assertThat(context).hasSingleBean(ServiceBusQueueClientFactory.class) .hasSingleBean(ServiceBusQueueOperation.class)); } @Test public void testConnectionStringProvided() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1") .withUserConfiguration(AzureServiceBusAutoConfiguration.class) .run(context -> { assertThat(context.getBean(ServiceBusConnectionStringProvider.class).getConnectionString()).isEqualTo("str1"); assertThat(context).doesNotHaveBean(ServiceBusNamespaceManager.class); assertThat(context).doesNotHaveBean(ServiceBusQueueManager.class); assertThat(context).hasSingleBean(ServiceBusQueueClientFactory.class); assertThat(context).hasSingleBean(ServiceBusQueueOperation.class); assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); }); } @Test public void testResourceManagerProvided() { this.contextRunner.withUserConfiguration( TestConfigWithAzureResourceManager.class, TestConfigWithConnectionStringProvider.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( AZURE_PROPERTY_PREFIX + "resource-group=rg1", SERVICE_BUS_PROPERTY_PREFIX + "namespace=ns1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusQueueClientFactory.class); assertThat(context).hasSingleBean(ServiceBusQueueOperation.class); assertThat(context).hasSingleBean(ServiceBusNamespaceManager.class); assertThat(context).hasSingleBean(ServiceBusQueueManager.class); }); } @Test @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithServiceBusNamespaceManager { @Bean public ServiceBusNamespaceManager servicebusNamespaceManager() { return mock(ServiceBusNamespaceManager.class); } } @Configuration @EnableConfigurationProperties(AzureServiceBusProperties.class) public static class TestConfigWithConnectionStringProvider { @Bean public ServiceBusConnectionStringProvider serviceBusConnectionStringProvider() { return new ServiceBusConnectionStringProvider("fake"); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithAzureResourceManager { @Bean public AzureResourceManager azureResourceManager() { return mock(AzureResourceManager.class); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithMessageConverter { @Bean public ServiceBusMessageConverter messageConverter() { return mock(ServiceBusMessageConverter.class); } } }
Same here.
public void testMessageConverterProvided() { this.contextRunner.withUserConfiguration( TestConfigWithMessageConverter.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); assertThat(context).hasSingleBean(ServiceBusTopicTemplate.class); ServiceBusMessageConverter messageConverter = context.getBean(ServiceBusMessageConverter.class); ServiceBusTopicTemplate topicTemplate = context.getBean(ServiceBusTopicTemplate.class); assertTrue(messageConverter == topicTemplate.getMessageConverter()); }); }
assertTrue(messageConverter == topicTemplate.getMessageConverter());
public void testMessageConverterProvided() { this.contextRunner.withUserConfiguration( TestConfigWithMessageConverter.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); assertThat(context).hasSingleBean(ServiceBusTopicTemplate.class); ServiceBusMessageConverter messageConverter = context.getBean(ServiceBusMessageConverter.class); ServiceBusTopicTemplate topicTemplate = context.getBean(ServiceBusTopicTemplate.class); assertSame(messageConverter, topicTemplate.getMessageConverter()); }); }
class AzureServiceBusTopicAutoConfigurationTest { private static final String SERVICE_BUS_PROPERTY_PREFIX = "spring.cloud.azure.servicebus."; private static final String AZURE_PROPERTY_PREFIX = "spring.cloud.azure."; private ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureServiceBusTopicAutoConfiguration.class)); @Test public void testAzureServiceBusTopicDisabled() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "enabled=false") .run(context -> assertThat(context).doesNotHaveBean(ServiceBusTopicOperation.class)); } @Test public void testWithoutAzureServiceBusTopicClient() { this.contextRunner.withClassLoader(new FilteredClassLoader(TopicClient.class)) .run(context -> assertThat(context).doesNotHaveBean(ServiceBusTopicOperation.class)); } @Test public void testWithoutServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class) .run(c -> assertThat(c).doesNotHaveBean(ServiceBusTopicManager.class) .doesNotHaveBean(ServiceBusTopicSubscriptionManager.class)); } @Test public void testWithServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class, TestConfigWithServiceBusNamespaceManager.class) .run(context -> assertThat(context).hasSingleBean(ServiceBusTopicManager.class) .hasSingleBean(ServiceBusTopicSubscriptionManager.class)); } @Test public void testTopicClientFactoryCreated() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class, TestConfigWithServiceBusNamespaceManager.class) .run(context -> assertThat(context).hasSingleBean(ServiceBusTopicClientFactory.class) .hasSingleBean(ServiceBusTopicOperation.class)); } @Test public void testConnectionStringProvided() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1") .withUserConfiguration(AzureServiceBusAutoConfiguration.class) .run(context -> { assertThat(context.getBean(ServiceBusConnectionStringProvider.class).getConnectionString()).isEqualTo("str1"); assertThat(context).doesNotHaveBean(ServiceBusNamespaceManager.class); assertThat(context).doesNotHaveBean(ServiceBusTopicManager.class); assertThat(context).doesNotHaveBean(ServiceBusTopicSubscriptionManager.class); assertThat(context).hasSingleBean(ServiceBusTopicClientFactory.class); assertThat(context).hasSingleBean(ServiceBusTopicOperation.class); assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); }); } @Test public void testResourceManagerProvided() { this.contextRunner.withUserConfiguration(TestConfigWithAzureResourceManager.class, TestConfigWithConnectionStringProvider.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( AZURE_PROPERTY_PREFIX + "resource-group=rg1", SERVICE_BUS_PROPERTY_PREFIX + "namespace=ns1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusTopicClientFactory.class); assertThat(context).hasSingleBean(ServiceBusTopicOperation.class); assertThat(context).hasSingleBean(ServiceBusNamespaceManager.class); assertThat(context).hasSingleBean(ServiceBusTopicManager.class); assertThat(context).hasSingleBean(ServiceBusTopicSubscriptionManager.class); }); } @Test @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithServiceBusNamespaceManager { @Bean public ServiceBusNamespaceManager servicebusNamespaceManager() { return mock(ServiceBusNamespaceManager.class); } } @Configuration @EnableConfigurationProperties(AzureServiceBusProperties.class) public static class TestConfigWithConnectionStringProvider { @Bean public ServiceBusConnectionStringProvider serviceBusConnectionStringProvider() { return new ServiceBusConnectionStringProvider("fake"); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithAzureResourceManager { @Bean public AzureResourceManager azureResourceManager() { return mock(AzureResourceManager.class); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithMessageConverter { @Bean public ServiceBusMessageConverter messageConverter() { return mock(ServiceBusMessageConverter.class); } } }
class AzureServiceBusTopicAutoConfigurationTest { private static final String SERVICE_BUS_PROPERTY_PREFIX = "spring.cloud.azure.servicebus."; private static final String AZURE_PROPERTY_PREFIX = "spring.cloud.azure."; private ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureServiceBusTopicAutoConfiguration.class)); @Test public void testAzureServiceBusTopicDisabled() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "enabled=false") .run(context -> assertThat(context).doesNotHaveBean(ServiceBusTopicOperation.class)); } @Test public void testWithoutAzureServiceBusTopicClient() { this.contextRunner.withClassLoader(new FilteredClassLoader(TopicClient.class)) .run(context -> assertThat(context).doesNotHaveBean(ServiceBusTopicOperation.class)); } @Test public void testWithoutServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class) .run(c -> assertThat(c).doesNotHaveBean(ServiceBusTopicManager.class) .doesNotHaveBean(ServiceBusTopicSubscriptionManager.class)); } @Test public void testWithServiceBusNamespaceManager() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class, TestConfigWithServiceBusNamespaceManager.class) .run(context -> assertThat(context).hasSingleBean(ServiceBusTopicManager.class) .hasSingleBean(ServiceBusTopicSubscriptionManager.class)); } @Test public void testTopicClientFactoryCreated() { this.contextRunner.withUserConfiguration(TestConfigWithConnectionStringProvider.class, TestConfigWithServiceBusNamespaceManager.class) .run(context -> assertThat(context).hasSingleBean(ServiceBusTopicClientFactory.class) .hasSingleBean(ServiceBusTopicOperation.class)); } @Test public void testConnectionStringProvided() { this.contextRunner.withPropertyValues(SERVICE_BUS_PROPERTY_PREFIX + "connection-string=str1") .withUserConfiguration(AzureServiceBusAutoConfiguration.class) .run(context -> { assertThat(context.getBean(ServiceBusConnectionStringProvider.class).getConnectionString()).isEqualTo("str1"); assertThat(context).doesNotHaveBean(ServiceBusNamespaceManager.class); assertThat(context).doesNotHaveBean(ServiceBusTopicManager.class); assertThat(context).doesNotHaveBean(ServiceBusTopicSubscriptionManager.class); assertThat(context).hasSingleBean(ServiceBusTopicClientFactory.class); assertThat(context).hasSingleBean(ServiceBusTopicOperation.class); assertThat(context).hasSingleBean(ServiceBusMessageConverter.class); }); } @Test public void testResourceManagerProvided() { this.contextRunner.withUserConfiguration(TestConfigWithAzureResourceManager.class, TestConfigWithConnectionStringProvider.class, AzureServiceBusAutoConfiguration.class) .withPropertyValues( AZURE_PROPERTY_PREFIX + "resource-group=rg1", SERVICE_BUS_PROPERTY_PREFIX + "namespace=ns1" ) .run(context -> { assertThat(context).hasSingleBean(ServiceBusTopicClientFactory.class); assertThat(context).hasSingleBean(ServiceBusTopicOperation.class); assertThat(context).hasSingleBean(ServiceBusNamespaceManager.class); assertThat(context).hasSingleBean(ServiceBusTopicManager.class); assertThat(context).hasSingleBean(ServiceBusTopicSubscriptionManager.class); }); } @Test @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithServiceBusNamespaceManager { @Bean public ServiceBusNamespaceManager servicebusNamespaceManager() { return mock(ServiceBusNamespaceManager.class); } } @Configuration @EnableConfigurationProperties(AzureServiceBusProperties.class) public static class TestConfigWithConnectionStringProvider { @Bean public ServiceBusConnectionStringProvider serviceBusConnectionStringProvider() { return new ServiceBusConnectionStringProvider("fake"); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithAzureResourceManager { @Bean public AzureResourceManager azureResourceManager() { return mock(AzureResourceManager.class); } } @Configuration @EnableConfigurationProperties(AzureProperties.class) public static class TestConfigWithMessageConverter { @Bean public ServiceBusMessageConverter messageConverter() { return mock(ServiceBusMessageConverter.class); } } }
how about `if ( jwt.getClaim(claimName) instanceof String) `
private Collection<String> getAuthorities(Jwt jwt) { List<String> authoritiesList = new ArrayList(); for (String claimName : WELL_KNOWN_AUTHORITIES_CLAIM_NAMES) { if (jwt.containsClaim(claimName)) { Object authorities = jwt.getClaim(claimName); if (authorities instanceof String) { if (StringUtils.hasText((String) authorities)) { authoritiesList.addAll(Arrays.asList(((String) authorities).split(" ")) .stream() .map(s -> DEFAULT_SCP_AUTHORITY_PREFIX + s) .collect(Collectors.toList())); } } else if (authorities instanceof Collection) { authoritiesList.addAll(((Collection<?>) authorities) .stream() .filter(s -> StringUtils.hasText((String) s)) .map(s -> DEFAULT_ROLES_AUTHORITY_PREFIX + s) .collect(Collectors.toList())); } } } return authoritiesList; }
if (authorities instanceof String) {
private Collection<String> getAuthorities(Jwt jwt) { Collection<String> authoritiesList = new ArrayList<String>(); for (String claimName : WELL_KNOWN_AUTHORITIES_CLAIM_NAMES) { if (jwt.containsClaim(claimName)) { if (jwt.getClaim(claimName) instanceof String) { if (StringUtils.hasText(jwt.getClaim(claimName))) { authoritiesList.addAll(Arrays.asList(((String) jwt.getClaim(claimName)).split(" ")) .stream() .map(s -> DEFAULT_SCP_AUTHORITY_PREFIX + s) .collect(Collectors.toList())); } } else if (jwt.getClaim(claimName) instanceof Collection) { authoritiesList.addAll(((Collection<?>) jwt.getClaim(claimName)) .stream() .filter(s -> StringUtils.hasText((String) s)) .map(s -> DEFAULT_ROLES_AUTHORITY_PREFIX + s) .collect(Collectors.toList())); } } } return authoritiesList; }
class AADJwtGrantedAuthoritiesConverter implements Converter<Jwt, Collection<GrantedAuthority>> { private static final String DEFAULT_SCP_AUTHORITY_PREFIX = "SCOPE_"; private static final String DEFAULT_ROLES_AUTHORITY_PREFIX = "APPROLE_"; private static final Collection<String> WELL_KNOWN_AUTHORITIES_CLAIM_NAMES = Arrays.asList("scp", "roles"); @Override public Collection<GrantedAuthority> convert(Jwt jwt) { Collection<GrantedAuthority> grantedAuthorities = new ArrayList<>(); for (String authority : getAuthorities(jwt)) { grantedAuthorities.add(new SimpleGrantedAuthority(authority)); } return grantedAuthorities; } }
class AADJwtGrantedAuthoritiesConverter implements Converter<Jwt, Collection<GrantedAuthority>> { private static final String DEFAULT_SCP_AUTHORITY_PREFIX = "SCOPE_"; private static final String DEFAULT_ROLES_AUTHORITY_PREFIX = "APPROLE_"; private static final Collection<String> WELL_KNOWN_AUTHORITIES_CLAIM_NAMES = Arrays.asList("scp", "roles"); @Override public Collection<GrantedAuthority> convert(Jwt jwt) { Collection<GrantedAuthority> grantedAuthorities = new ArrayList<>(); for (String authority : getAuthorities(jwt)) { grantedAuthorities.add(new SimpleGrantedAuthority(authority)); } return grantedAuthorities; } }
Could we flip this to an early out returning `Mono.just(false)`
public Mono<Boolean> onChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { String authHeader = response.getHeaderValue(WWW_AUTHENTICATE); if (response.getStatusCode() == 401 && authHeader != null) { List<AuthenticationChallenge> challenges = parseChallenges(authHeader); for (AuthenticationChallenge authenticationChallenge : challenges) { Map<String, String> extractedChallengeParams = parseChallengeParams(authenticationChallenge.getChallengeParameters()); if (extractedChallengeParams.containsKey(CLAIMS_PARAMETER)) { String claims = new String(Base64.getUrlDecoder() .decode(extractedChallengeParams.get(CLAIMS_PARAMETER)), StandardCharsets.UTF_8); String[] scopes; try { scopes = (String[]) context.getData(ARM_SCOPES_KEY).get(); } catch (NoSuchElementException e) { scopes = this.scopes; } if (scopes == null || scopes.length == 0) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } return authorizeRequest(context, new TokenRequestContext() .addScopes(scopes).setClaims(claims)) .flatMap(b -> Mono.just(true)); } } } return Mono.just(false); }); }
if (response.getStatusCode() == 401 && authHeader != null) {
public Mono<Boolean> onChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { String authHeader = response.getHeaderValue(WWW_AUTHENTICATE); if (!(response.getStatusCode() == 401 && authHeader != null)) { return Mono.just(false); } else { List<AuthenticationChallenge> challenges = parseChallenges(authHeader); for (AuthenticationChallenge authenticationChallenge : challenges) { Map<String, String> extractedChallengeParams = parseChallengeParams(authenticationChallenge.getChallengeParameters()); if (extractedChallengeParams.containsKey(CLAIMS_PARAMETER)) { String claims = new String(Base64.getUrlDecoder() .decode(extractedChallengeParams.get(CLAIMS_PARAMETER)), StandardCharsets.UTF_8); String[] scopes; try { scopes = (String[]) context.getData(ARM_SCOPES_KEY).get(); } catch (NoSuchElementException e) { scopes = this.scopes; } scopes = getScopes(context, scopes); return authorizeRequest(context, new TokenRequestContext() .addScopes(scopes).setClaims(claims)) .flatMap(b -> Mono.just(true)); } } return Mono.just(false); } }); }
class ARMChallengeAuthenticationPolicy extends BearerTokenAuthenticationChallengePolicy { private static final Pattern AUTHENTICATION_CHALLENGE_PATTERN = Pattern.compile("(\\w+) ((?:\\w+=\".*?\"(?:, )?)+)(?:, )?"); private static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN = Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+"); private static final String CLAIMS_PARAMETER = "claims"; private final String[] scopes; private final AzureEnvironment environment; private static final String ARM_SCOPES_KEY = "ARMScopes"; /** * Creates ARMChallengeAuthenticationPolicy. * * @param credential the token credential to authenticate the request * @param environment the environment with endpoints for authentication * @param scopes the scopes used in credential, using default scopes when empty */ public ARMChallengeAuthenticationPolicy(TokenCredential credential, AzureEnvironment environment, String... scopes) { super(credential); this.scopes = scopes; this.environment = environment; } @Override public Mono<Void> onBeforeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { TokenRequestContext trc; String[] scopes = this.scopes; if (scopes == null || scopes.length == 0) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } trc = new TokenRequestContext().addScopes(scopes); context.setData(ARM_SCOPES_KEY, scopes); return authorizeRequest(context, trc); }); } @Override List<AuthenticationChallenge> parseChallenges(String header) { Matcher matcher = AUTHENTICATION_CHALLENGE_PATTERN.matcher(header); List<AuthenticationChallenge> challenges = new ArrayList<>(); while (matcher.find()) { challenges.add(new AuthenticationChallenge(matcher.group(1), matcher.group(2))); } return challenges; } Map<String, String> parseChallengeParams(String challengeParams) { Matcher matcher = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams); Map<String, String> challengeParameters = new HashMap<>(); while (matcher.find()) { challengeParameters.put(matcher.group(1), matcher.group(2)); } return challengeParameters; } }
class ARMChallengeAuthenticationPolicy extends BearerTokenAuthenticationChallengePolicy { private static final Pattern AUTHENTICATION_CHALLENGE_PATTERN = Pattern.compile("(\\w+) ((?:\\w+=\".*?\"(?:, )?)+)(?:, )?"); private static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN = Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+"); private static final String CLAIMS_PARAMETER = "claims"; private final String[] scopes; private final AzureEnvironment environment; private static final String ARM_SCOPES_KEY = "ARMScopes"; /** * Creates ARMChallengeAuthenticationPolicy. * * @param credential the token credential to authenticate the request * @param environment the environment with endpoints for authentication * @param scopes the scopes used in credential, using default scopes when empty */ public ARMChallengeAuthenticationPolicy(TokenCredential credential, AzureEnvironment environment, String... scopes) { super(credential); this.scopes = scopes; this.environment = environment; } @Override public Mono<Void> onBeforeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { String[] scopes = this.scopes; scopes = getScopes(context, scopes); context.setData(ARM_SCOPES_KEY, scopes); return authorizeRequest(context, new TokenRequestContext().addScopes(scopes)); }); } @Override private String[] getScopes(HttpPipelineCallContext context, String[] scopes) { if (CoreUtils.isNullOrEmpty(scopes)) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } return scopes; } List<AuthenticationChallenge> parseChallenges(String header) { Matcher matcher = AUTHENTICATION_CHALLENGE_PATTERN.matcher(header); List<AuthenticationChallenge> challenges = new ArrayList<>(); while (matcher.find()) { challenges.add(new AuthenticationChallenge(matcher.group(1), matcher.group(2))); } return challenges; } Map<String, String> parseChallengeParams(String challengeParams) { Matcher matcher = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams); Map<String, String> challengeParameters = new HashMap<>(); while (matcher.find()) { challengeParameters.put(matcher.group(1), matcher.group(2)); } return challengeParameters; } }
name should be updated to current test name
public void sendIdempotencyCheck(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToSingleUserWithOptions"); SmsSendResult response1 = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE, options); assertTrue(response1.isSuccessful()); SmsSendResult response2 = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE, options); assertTrue(response2.isSuccessful()); assertNotEquals(response1.getMessageId(), response2.getMessageId()); }
client = setupSyncClient(builder, "sendToSingleUserWithOptions");
public void sendIdempotencyCheck(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendIdempotencyCheck"); SmsSendResult response1 = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE, options); assertTrue(response1.isSuccessful()); SmsSendResult response2 = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE, options); assertTrue(response2.isSuccessful()); assertNotEquals(response1.getMessageId(), response2.getMessageId()); }
class SmsClientTests extends SmsTestBase { private List<String> to; private SmsClient client; @Override protected void beforeTest() { super.beforeTest(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createSyncClientUsingConnectionString(HttpClient httpClient) { to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "createSyncClientUsingConnectionString"); assertNotNull(client); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, null, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); assertNull(r.getErrorMessage()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderNoEndpoint(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); builder .endpoint(null) .httpClient(new NoOpHttpClient()); assertThrows(Exception.class, () -> { builder.buildAsyncClient(); }); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderServiceVersion(HttpClient httpClient) { assertNotNull(SmsServiceVersion.getLatest()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderNotRetryPolicy(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); builder.retryPolicy(null); client = setupSyncClient(builder, "builderNotRetryPolicy"); assertNotNull(client); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsUsingTokenCredential(HttpClient httpClient) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); to = new ArrayList<>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClientWithToken(httpClient, tokenCredential); client = setupSyncClient(builder, "sendSmsUsingTokenCredential"); assertNotNull(client); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, null, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToIncorrectPhoneNumber(HttpClient httpClient) { to = new ArrayList<String>(); to.add("+155512345678"); SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendToIncorrectPhoneNumber"); assertNotNull(client); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, null, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertFalse(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromFakeNumber(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToSingleUserWithOptions"); try { SmsSendResult response = client.send("+155512345678", SMS_SERVICE_PHONE_NUMBER, MESSAGE, options); } catch (Exception exception) { assertEquals(400, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromUnauthorizedNumber(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendFromUnauthorizedNumber"); try { SmsSendResult response = client.send("+18007342577", SMS_SERVICE_PHONE_NUMBER, MESSAGE, options, Context.NONE); } catch (Exception exception) { assertEquals(404, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToMultipleUsers(HttpClient httpClient) { to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToMultipleUsers"); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToMultipleUsersWithOptions(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToMultipleUsersWithOptions"); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, options, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToSingleUser(HttpClient httpClient) { SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToSingleUser"); SmsSendResult response = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE); assertNotNull(response); assertTrue(response.isSuccessful()); assertNull(response.getErrorMessage()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToSingleUserWithOptions(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToSingleUserWithOptions"); SmsSendResult response = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE, options, Context.NONE); assertNotNull(response); assertTrue(response.isSuccessful()); } private SmsClient setupSyncClient(SmsClientBuilder builder, String testName) { return addLoggingPolicy(builder, testName).buildClient(); } }
class SmsClientTests extends SmsTestBase { private List<String> to; private SmsClient client; @Override protected void beforeTest() { super.beforeTest(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createSyncClientUsingConnectionString(HttpClient httpClient) { to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "createSyncClientUsingConnectionString"); assertNotNull(client); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, null, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); assertNull(r.getErrorMessage()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderNoEndpoint(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); builder .endpoint(null) .httpClient(new NoOpHttpClient()); assertThrows(Exception.class, () -> { builder.buildAsyncClient(); }); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderServiceVersion(HttpClient httpClient) { assertNotNull(SmsServiceVersion.getLatest()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderTestsConfigurations(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); builder.retryPolicy(null); AzureKeyCredential credential = new AzureKeyCredential(ACCESSKEY); HttpPipelinePolicy[] policies = new HttpPipelinePolicy[2]; policies[0] = new HmacAuthenticationPolicy(credential); policies[1] = new UserAgentPolicy(); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies) .httpClient(httpClient) .build(); builder.pipeline(pipeline); client = setupSyncClient(builder, "builderTestsConfigurations"); assertNotNull(client); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void builderNotRetryPolicy(HttpClient httpClient) { SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); builder.retryPolicy(null); client = setupSyncClient(builder, "builderNotRetryPolicy"); assertNotNull(client); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendSmsUsingTokenCredential(HttpClient httpClient) { TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); to = new ArrayList<>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClientWithToken(httpClient, tokenCredential); client = setupSyncClient(builder, "sendSmsUsingTokenCredential"); assertNotNull(client); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, null, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToIncorrectPhoneNumber(HttpClient httpClient) { to = new ArrayList<String>(); to.add("+155512345678"); SmsClientBuilder builder = getSmsClientUsingConnectionString(httpClient); client = setupSyncClient(builder, "sendToIncorrectPhoneNumber"); assertNotNull(client); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, null, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertFalse(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromFakeNumber(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendFromFakeNumber"); try { SmsSendResult response = client.send("+155512345678", SMS_SERVICE_PHONE_NUMBER, MESSAGE, options); } catch (Exception exception) { assertEquals(400, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendFromUnauthorizedNumber(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendFromUnauthorizedNumber"); try { SmsSendResult response = client.send("+18007342577", SMS_SERVICE_PHONE_NUMBER, MESSAGE, options, Context.NONE); } catch (Exception exception) { assertEquals(404, ((HttpResponseException) exception).getResponse().getStatusCode()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToMultipleUsers(HttpClient httpClient) { to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToMultipleUsers"); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToMultipleUsersWithOptions(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToMultipleUsersWithOptions"); Iterable<SmsSendResult> response = client.send(SMS_SERVICE_PHONE_NUMBER, to, MESSAGE, options, Context.NONE); assertNotNull(response); for (SmsSendResult r : response) { assertTrue(r.isSuccessful()); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToSingleUser(HttpClient httpClient) { SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToSingleUser"); SmsSendResult response = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE); assertNotNull(response); assertTrue(response.isSuccessful()); assertNull(response.getErrorMessage()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void sendToSingleUserWithOptions(HttpClient httpClient) { SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); options.setTag("New Tag"); to = new ArrayList<String>(); to.add(SMS_SERVICE_PHONE_NUMBER); SmsClientBuilder builder = getSmsClient(httpClient); client = setupSyncClient(builder, "sendToSingleUserWithOptions"); SmsSendResult response = client.send(SMS_SERVICE_PHONE_NUMBER, SMS_SERVICE_PHONE_NUMBER, MESSAGE, options, Context.NONE); assertNotNull(response); assertTrue(response.isSuccessful()); } private SmsClient setupSyncClient(SmsClientBuilder builder, String testName) { return addLoggingPolicy(builder, testName).buildClient(); } }
I think `Constant.AZURE_CLOUD_TYPE_GLOBAL.equalsIgnoreCase(AZURE_CLOUD_TYPE) ` is enough
public AADB2CSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { super(appClass, properties); userEmail = AAD_B2C_USER_EMAIL; userPassword = AAD_B2C_USER_PASSWORD; isAzureCloudGlobal = Constant.AZURE_CLOUD_TYPE_GLOBAL.equalsIgnoreCase(AZURE_CLOUD_TYPE) ? true : false; }
isAzureCloudGlobal = Constant.AZURE_CLOUD_TYPE_GLOBAL.equalsIgnoreCase(AZURE_CLOUD_TYPE) ? true : false;
public AADB2CSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { super(appClass, properties); userEmail = AAD_B2C_USER_EMAIL; userPassword = AAD_B2C_USER_PASSWORD; isAzureCloudGlobal = Constant.AZURE_CLOUD_TYPE_GLOBAL.equalsIgnoreCase(AZURE_CLOUD_TYPE); }
class AADB2CSeleniumITHelper extends SeleniumITHelper { private String userEmail; private String userPassword; private boolean isAzureCloudGlobal; public static Map<String, String> createDefaultProperteis() { Map<String, String> defaultProperteis = new HashMap<>(); defaultProperteis.put("azure.activedirectory.b2c.base-uri", AAD_B2C_BASE_URI); defaultProperteis.put("azure.activedirectory.b2c.client-id", AAD_B2C_CLIENT_ID); defaultProperteis.put("azure.activedirectory.b2c.client-secret", AAD_B2C_CLIENT_SECRET); defaultProperteis.put("azure.activedirectory.b2c.reply-url", AAD_B2C_REPLY_URL); defaultProperteis .put("azure.activedirectory.b2c.user-flows.sign-up-or-sign-in", AAD_B2C_SIGN_UP_OR_SIGN_IN); defaultProperteis .put("azure.activedirectory.b2c.user-flows.profile-edit", AAD_B2C_PROFILE_EDIT); return defaultProperteis; } public void logIn() { driver.get(app.root()); if (isAzureCloudGlobal) { wait.until(presenceOfElementLocated(By.id("email"))).sendKeys(userEmail); } else { wait.until(presenceOfElementLocated(By.id("logonIdentifier"))).sendKeys(userEmail); } wait.until(presenceOfElementLocated(By.id("password"))).sendKeys(userPassword); if (isAzureCloudGlobal) { wait.until(presenceOfElementLocated(By.cssSelector("button[type='submit']"))).sendKeys(Keys.ENTER); } else { wait.until(presenceOfElementLocated(By.id("next"))).sendKeys(Keys.ENTER); } manualRedirection(); } public void profileEditJobTitle(String newJobTitle) { wait.until(presenceOfElementLocated(By.id("profileEdit"))).click(); changeJobTile(newJobTitle); if (isAzureCloudGlobal) { wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); } else { wait.until(presenceOfElementLocated(By.id("continue"))).sendKeys(Keys.ENTER); } manualRedirection(); } public void logout() { wait.until(presenceOfElementLocated(By.id("logout"))).click(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).submit(); manualRedirection(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector( "a[href='/oauth2/authorization/" + AAD_B2C_SIGN_UP_OR_SIGN_IN + "']"))).click(); } private void manualRedirection() { wait.until(ExpectedConditions.urlMatches("^http: String currentUrl = driver.getCurrentUrl(); String newCurrentUrl = currentUrl.replaceFirst("http: driver.get(newCurrentUrl); } public void changeJobTile(String newValue) { String elementId = "jobTitle"; wait.until(presenceOfElementLocated(By.id(elementId))).clear(); wait.until(presenceOfElementLocated(By.id(elementId))).sendKeys(newValue); } public String getJobTitle() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[10]")) .findElement(By.xpath("th[2]")) .getText(); } public String getName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[2]")) .findElement(By.xpath("th[2]")) .getText(); } public String getUserFlowName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[last()]")) .findElement(By.xpath("th[2]")) .getText(); } public String getSignInButtonText() { if (isAzureCloudGlobal) { return wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).getText(); } else { return wait.until(ExpectedConditions.elementToBeClickable(By.id("next"))).getText(); } } }
class AADB2CSeleniumITHelper extends SeleniumITHelper { private static final Logger LOGGER = LoggerFactory.getLogger(AADB2CSeleniumITHelper.class); private String userEmail; private String userPassword; private boolean isAzureCloudGlobal; public static Map<String, String> createDefaultProperteis() { Map<String, String> defaultProperteis = new HashMap<>(); defaultProperteis.put("azure.activedirectory.b2c.base-uri", AAD_B2C_BASE_URI); defaultProperteis.put("azure.activedirectory.b2c.client-id", AAD_B2C_CLIENT_ID); defaultProperteis.put("azure.activedirectory.b2c.client-secret", AAD_B2C_CLIENT_SECRET); defaultProperteis.put("azure.activedirectory.b2c.reply-url", AAD_B2C_REPLY_URL); defaultProperteis .put("azure.activedirectory.b2c.user-flows.sign-up-or-sign-in", AAD_B2C_SIGN_UP_OR_SIGN_IN); defaultProperteis .put("azure.activedirectory.b2c.user-flows.profile-edit", AAD_B2C_PROFILE_EDIT); return defaultProperteis; } public void logIn() { driver.get(app.root()); LOGGER.info("Current url is " + driver.getCurrentUrl()); wait.until(ExpectedConditions.urlMatches("^https: if (isAzureCloudGlobal) { wait.until(presenceOfElementLocated(By.id("email"))).sendKeys(userEmail); } else { wait.until(presenceOfElementLocated(By.id("logonIdentifier"))).sendKeys(userEmail); } wait.until(presenceOfElementLocated(By.id("password"))).sendKeys(userPassword); if (isAzureCloudGlobal) { wait.until(presenceOfElementLocated(By.cssSelector("button[type='submit']"))).sendKeys(Keys.ENTER); } else { wait.until(presenceOfElementLocated(By.id("next"))).sendKeys(Keys.ENTER); } manualRedirection(); } public void profileEditJobTitle(String newJobTitle) { wait.until(presenceOfElementLocated(By.id("profileEdit"))).click(); changeJobTile(newJobTitle); if (isAzureCloudGlobal) { wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); } else { wait.until(presenceOfElementLocated(By.id("continue"))).sendKeys(Keys.ENTER); } manualRedirection(); } public void logout() { wait.until(presenceOfElementLocated(By.id("logout"))).click(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).submit(); manualRedirection(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector( "a[href='/oauth2/authorization/" + AAD_B2C_SIGN_UP_OR_SIGN_IN + "']"))).click(); } private void manualRedirection() { wait.until(ExpectedConditions.urlMatches("^http: String currentUrl = driver.getCurrentUrl(); String newCurrentUrl = currentUrl.replaceFirst("http: driver.get(newCurrentUrl); } public void changeJobTile(String newValue) { String elementId = "jobTitle"; wait.until(presenceOfElementLocated(By.id(elementId))).clear(); wait.until(presenceOfElementLocated(By.id(elementId))).sendKeys(newValue); } public String getJobTitle() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[10]")) .findElement(By.xpath("th[2]")) .getText(); } public String getName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[2]")) .findElement(By.xpath("th[2]")) .getText(); } public String getUserFlowName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[last()]")) .findElement(By.xpath("th[2]")) .getText(); } public String getSignInButtonText() { if (isAzureCloudGlobal) { return wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).getText(); } else { return wait.until(ExpectedConditions.elementToBeClickable(By.id("next"))).getText(); } } }
The error message in [messages](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/eventhubs/azure-messaging-eventhubs/src/main/resources/eventhubs-messages.properties#L1) file should be updated as well.
private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } Object spanObject = spanScope.get(); if (spanObject instanceof AutoCloseable) { AutoCloseable close = (AutoCloseable) spanObject; try { close.close(); } catch (Exception exception) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, exception); } } else { logger.verbose(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanObject != null ? spanObject.getClass() : "null")); } tracerProvider.endSpan(processSpanContext, signal); }
if (spanObject instanceof AutoCloseable) {
private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } Object spanObject = spanScope.get(); if (spanObject instanceof AutoCloseable) { AutoCloseable close = (AutoCloseable) spanObject; try { close.close(); } catch (Exception exception) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, exception); } } else { logger.verbose(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanObject != null ? spanObject.getClass() : "null")); } tracerProvider.endSpan(processSpanContext, signal); }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; private final Duration maxWaitTime; private final int maxBatchSize; private final boolean batchReceiveMode; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. * @param maxBatchSize The maximum batch size to receive per users' process handler invocation. * @param maxWaitTime The maximum time to wait to receive a batch or a single event. * @param batchReceiveMode The boolean value indicating if this processor is configured to receive in batches or * single events. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition, int maxBatchSize, Duration maxWaitTime, boolean batchReceiveMode) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; this.maxBatchSize = maxBatchSize; this.maxWaitTime = maxWaitTime; this.batchReceiveMode = batchReceiveMode; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Checks the state of the connection for the given partition. If the connection is closed, then this method will * remove the partition from the list of partition pumps. * * @param ownership The partition ownership information for which the connection state will be verified. */ void verifyPartitionConnection(PartitionOwnership ownership) { String partitionId = ownership.getPartitionId(); if (partitionPumps.containsKey(partitionId)) { EventHubConsumerAsyncClient consumerClient = partitionPumps.get(partitionId); if (consumerClient.isConnectionClosed()) { logger.info("Connection closed for {}, partition {}. Removing the consumer.", ownership.getEventHubName(), partitionId); try { partitionPumps.get(partitionId).close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } } } } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); Flux<Flux<PartitionEvent>> partitionEventFlux; Flux<PartitionEvent> receiver = eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .doOnNext(partitionEvent -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("On next {}, {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId(), partitionEvent.getData().getSequenceNumber()); } }); if (maxWaitTime != null) { partitionEventFlux = receiver .windowTimeout(maxBatchSize, maxWaitTime); } else { partitionEventFlux = receiver .window(maxBatchSize); } partitionEventFlux .concatMap(Flux::collectList) .publishOn(Schedulers.boundedElastic()) .subscribe(partitionEventBatch -> { processEvents(partitionContext, partitionProcessor, eventHubConsumer, partitionEventBatch); }, /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, EventContext eventContext) { Context processSpanContext = null; EventData eventData = eventContext.getEventData(); if (eventData != null) { processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } } try { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, eventContext.getLastEnqueuedEventProperties())); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } private void processEvents(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, List<PartitionEvent> partitionEventBatch) { try { if (batchReceiveMode) { LastEnqueuedEventProperties[] lastEnqueuedEventProperties = new LastEnqueuedEventProperties[1]; List<EventData> eventDataList = partitionEventBatch.stream() .map(partitionEvent -> { lastEnqueuedEventProperties[0] = partitionEvent.getLastEnqueuedEventProperties(); return partitionEvent.getData(); }) .collect(Collectors.toList()); EventBatchContext eventBatchContext = new EventBatchContext(partitionContext, eventDataList, checkpointStore, lastEnqueuedEventProperties[0]); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event batch {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEventBatch(eventBatchContext); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event batch{}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } } else { EventData eventData = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getData() : null); LastEnqueuedEventProperties lastEnqueuedEventProperties = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getLastEnqueuedEventProperties() : null); EventContext eventContext = new EventContext(partitionContext, eventData, checkpointStore, lastEnqueuedEventProperties); processEvent(partitionContext, partitionProcessor, eventHubConsumer, eventContext); } } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); spanContext = eventData.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, eventData.getEnqueuedTime().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; private final Duration maxWaitTime; private final int maxBatchSize; private final boolean batchReceiveMode; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. * @param maxBatchSize The maximum batch size to receive per users' process handler invocation. * @param maxWaitTime The maximum time to wait to receive a batch or a single event. * @param batchReceiveMode The boolean value indicating if this processor is configured to receive in batches or * single events. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition, int maxBatchSize, Duration maxWaitTime, boolean batchReceiveMode) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; this.maxBatchSize = maxBatchSize; this.maxWaitTime = maxWaitTime; this.batchReceiveMode = batchReceiveMode; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Checks the state of the connection for the given partition. If the connection is closed, then this method will * remove the partition from the list of partition pumps. * * @param ownership The partition ownership information for which the connection state will be verified. */ void verifyPartitionConnection(PartitionOwnership ownership) { String partitionId = ownership.getPartitionId(); if (partitionPumps.containsKey(partitionId)) { EventHubConsumerAsyncClient consumerClient = partitionPumps.get(partitionId); if (consumerClient.isConnectionClosed()) { logger.info("Connection closed for {}, partition {}. Removing the consumer.", ownership.getEventHubName(), partitionId); try { partitionPumps.get(partitionId).close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } } } } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); Flux<Flux<PartitionEvent>> partitionEventFlux; Flux<PartitionEvent> receiver = eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .doOnNext(partitionEvent -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("On next {}, {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId(), partitionEvent.getData().getSequenceNumber()); } }); if (maxWaitTime != null) { partitionEventFlux = receiver .windowTimeout(maxBatchSize, maxWaitTime); } else { partitionEventFlux = receiver .window(maxBatchSize); } partitionEventFlux .concatMap(Flux::collectList) .publishOn(Schedulers.boundedElastic()) .subscribe(partitionEventBatch -> { processEvents(partitionContext, partitionProcessor, eventHubConsumer, partitionEventBatch); }, /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, EventContext eventContext) { Context processSpanContext = null; EventData eventData = eventContext.getEventData(); if (eventData != null) { processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } } try { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, eventContext.getLastEnqueuedEventProperties())); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } private void processEvents(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, List<PartitionEvent> partitionEventBatch) { try { if (batchReceiveMode) { LastEnqueuedEventProperties[] lastEnqueuedEventProperties = new LastEnqueuedEventProperties[1]; List<EventData> eventDataList = partitionEventBatch.stream() .map(partitionEvent -> { lastEnqueuedEventProperties[0] = partitionEvent.getLastEnqueuedEventProperties(); return partitionEvent.getData(); }) .collect(Collectors.toList()); EventBatchContext eventBatchContext = new EventBatchContext(partitionContext, eventDataList, checkpointStore, lastEnqueuedEventProperties[0]); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event batch {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEventBatch(eventBatchContext); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event batch{}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } } else { EventData eventData = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getData() : null); LastEnqueuedEventProperties lastEnqueuedEventProperties = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getLastEnqueuedEventProperties() : null); EventContext eventContext = new EventContext(partitionContext, eventData, checkpointStore, lastEnqueuedEventProperties); processEvent(partitionContext, partitionProcessor, eventHubConsumer, eventContext); } } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); spanContext = eventData.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, eventData.getEnqueuedTime().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ }
```suggestion "Process span scope type is not of type AutoCloseable, but type: %s. Not closing the scope", ```
private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof AutoCloseable) { AutoCloseable close = (AutoCloseable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); } catch (Exception exception) { logger.error("endTracingSpan().close() failed with an error %s", exception); } } else { logger.warning(String.format(Locale.US, "Process span scope type is not of type Closeable, but type: %s. Not closing the scope and span", spanScope.get() != null ? spanScope.getClass() : "null")); } tracerProvider.endSpan(processSpanContext, signal); }
"Process span scope type is not of type Closeable, but type: %s. Not closing the scope and span",
private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof AutoCloseable) { AutoCloseable close = (AutoCloseable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); } catch (Exception exception) { logger.error("endTracingSpan().close() failed with an error %s", exception); } } else { logger.warning(String.format(Locale.US, "Process span scope type is not of type AutoCloseable, but type: %s. Not closing the scope" + " and span", spanScope.get() != null ? spanScope.getClass() : "null")); } tracerProvider.endSpan(processSpanContext, signal); }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private final TracerProvider tracerProvider; private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; this.tracerProvider = processorOptions.getTracerProvider(); } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; this.tracerProvider = processorOptions.getTracerProvider(); } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent (ie. * calling {@code start()} again after the processor is already running is a no-op). * <p> * Calling {@code start()} after calling {@link * underlying connection. * </p> * <p> * Calling {@code start()} after calling {@link * </p> */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } if (asyncClient.get() == null) { ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); } receiveMessages(); if (this.scheduledExecutor == null) { this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); receiverSubscription.set(null); } if (scheduledExecutor != null) { scheduledExecutor.shutdown(); scheduledExecutor = null; } if (asyncClient.get() != null) { asyncClient.get().close(); asyncClient.set(null); } } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running; {@code false} otherwise. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessagesWithContext() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusMessageContext serviceBusMessageContext) { if (serviceBusMessageContext.hasError()) { handleError(serviceBusMessageContext.getThrowable()); } else { Context processSpanContext = null; try { ServiceBusReceivedMessageContext serviceBusReceivedMessageContext = new ServiceBusReceivedMessageContext(receiverClient, serviceBusMessageContext); processSpanContext = startProcessTracingSpan(serviceBusMessageContext.getMessage(), receiverClient.getEntityPath(), receiverClient.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { serviceBusMessageContext.getMessage().addContext(SPAN_CONTEXT_KEY, processSpanContext); } processMessage.accept(serviceBusReceivedMessageContext); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Exception ex) { handleError(new ServiceBusException(ex, ServiceBusErrorSource.USER_CALLBACK)); endProcessTracingSpan(processSpanContext, Signal.error(ex)); if (!processorOptions.isDisableAutoComplete()) { logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusMessageContext, receiverClient); } } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } } }); } private Context startProcessTracingSpan(ServiceBusReceivedMessage receivedMessage, String entityPath, String fullyQualifiedNamespace) { Object diagnosticId = receivedMessage.getApplicationProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); spanContext = spanContext .addData(ENTITY_PATH_KEY, entityPath) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); spanContext = receivedMessage.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, receivedMessage.getEnqueuedTime().toInstant().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } private void abandonMessage(ServiceBusMessageContext serviceBusMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { ServiceBusReceiverAsyncClient client = asyncClient.get(); final String fullyQualifiedNamespace = client.getFullyQualifiedNamespace(); final String entityPath = client.getEntityPath(); processError.accept(new ServiceBusErrorContext(throwable, fullyQualifiedNamespace, entityPath)); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { if (!isRunning()) { return; } receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private final TracerProvider tracerProvider; private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; this.tracerProvider = processorOptions.getTracerProvider(); } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; this.tracerProvider = processorOptions.getTracerProvider(); } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent (ie. * calling {@code start()} again after the processor is already running is a no-op). * <p> * Calling {@code start()} after calling {@link * underlying connection. * </p> * <p> * Calling {@code start()} after calling {@link * </p> */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } if (asyncClient.get() == null) { ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); } receiveMessages(); if (this.scheduledExecutor == null) { this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); receiverSubscription.set(null); } if (scheduledExecutor != null) { scheduledExecutor.shutdown(); scheduledExecutor = null; } if (asyncClient.get() != null) { asyncClient.get().close(); asyncClient.set(null); } } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running; {@code false} otherwise. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessagesWithContext() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusMessageContext serviceBusMessageContext) { if (serviceBusMessageContext.hasError()) { handleError(serviceBusMessageContext.getThrowable()); } else { Context processSpanContext = null; try { ServiceBusReceivedMessageContext serviceBusReceivedMessageContext = new ServiceBusReceivedMessageContext(receiverClient, serviceBusMessageContext); processSpanContext = startProcessTracingSpan(serviceBusMessageContext.getMessage(), receiverClient.getEntityPath(), receiverClient.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { serviceBusMessageContext.getMessage().addContext(SPAN_CONTEXT_KEY, processSpanContext); } processMessage.accept(serviceBusReceivedMessageContext); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Exception ex) { handleError(new ServiceBusException(ex, ServiceBusErrorSource.USER_CALLBACK)); endProcessTracingSpan(processSpanContext, Signal.error(ex)); if (!processorOptions.isDisableAutoComplete()) { logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusMessageContext, receiverClient); } } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } } }); } private Context startProcessTracingSpan(ServiceBusReceivedMessage receivedMessage, String entityPath, String fullyQualifiedNamespace) { Object diagnosticId = receivedMessage.getApplicationProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); spanContext = spanContext .addData(ENTITY_PATH_KEY, entityPath) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE); spanContext = receivedMessage.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, receivedMessage.getEnqueuedTime().toInstant().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } private void abandonMessage(ServiceBusMessageContext serviceBusMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { ServiceBusReceiverAsyncClient client = asyncClient.get(); final String fullyQualifiedNamespace = client.getFullyQualifiedNamespace(); final String entityPath = client.getEntityPath(); processError.accept(new ServiceBusErrorContext(throwable, fullyQualifiedNamespace, entityPath)); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { if (!isRunning()) { return; } receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
are we covering the undefined, null, scenario for propMixed too? If so could you add a test for that too.
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
break;
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); } @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } } private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
> switch (i % 5) { [](start = 11, length = 17) missing null and undefined.
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
switch (i % 5) {
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); } @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } } private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
undefined is include. There are some documents being inserted above for which propMixed is undefined.
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
switch (i % 5) {
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); } @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } } private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
your test validation relies on `ItemComparator` which essentially means the validation uses the existing implementation comparator to validate the expected order. This is not ideal as if I go and change ItemComparator in the implementation and introduce a bug or change the comparer and its ordering, still this test will pass. For the validation you shouldn't have dependency on the implementation. Please try to find the expected order differently without relying on the implementation.
public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); }
.sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"),
public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); } private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); } private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
Undefined is already included. I just pushed a changed to include null also
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
break;
public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); } @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } } private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
Yes this is something I am aware of. In the end I am not using the ordering at all for validation and just checking to make sure all the documents are present.
public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); }
.sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"),
public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); } private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); } private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
Is it possible for this to return null?
public Mono<Boolean> onChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { String authHeader = response.getHeaderValue(WWW_AUTHENTICATE); if (response.getStatusCode() == 401 && authHeader != null) { List<AuthenticationChallenge> challenges = parseChallenges(authHeader); for (AuthenticationChallenge authenticationChallenge : challenges) { Map<String, String> extractedChallengeParams = parseChallengeParams(authenticationChallenge.getChallengeParameters()); if (extractedChallengeParams.containsKey(CLAIMS_PARAMETER)) { String claims = new String(Base64.getUrlDecoder() .decode(extractedChallengeParams.get(CLAIMS_PARAMETER)), StandardCharsets.UTF_8); String[] scopes; try { scopes = (String[]) context.getData(ARM_SCOPES_KEY).get(); } catch (NoSuchElementException e) { scopes = this.scopes; } if (scopes == null || scopes.length == 0) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } return authorizeRequest(context, new TokenRequestContext() .addScopes(scopes).setClaims(claims)) .flatMap(b -> Mono.just(true)); } } } return Mono.just(false); }); }
parseChallengeParams(authenticationChallenge.getChallengeParameters());
public Mono<Boolean> onChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { String authHeader = response.getHeaderValue(WWW_AUTHENTICATE); if (!(response.getStatusCode() == 401 && authHeader != null)) { return Mono.just(false); } else { List<AuthenticationChallenge> challenges = parseChallenges(authHeader); for (AuthenticationChallenge authenticationChallenge : challenges) { Map<String, String> extractedChallengeParams = parseChallengeParams(authenticationChallenge.getChallengeParameters()); if (extractedChallengeParams.containsKey(CLAIMS_PARAMETER)) { String claims = new String(Base64.getUrlDecoder() .decode(extractedChallengeParams.get(CLAIMS_PARAMETER)), StandardCharsets.UTF_8); String[] scopes; try { scopes = (String[]) context.getData(ARM_SCOPES_KEY).get(); } catch (NoSuchElementException e) { scopes = this.scopes; } scopes = getScopes(context, scopes); return authorizeRequest(context, new TokenRequestContext() .addScopes(scopes).setClaims(claims)) .flatMap(b -> Mono.just(true)); } } return Mono.just(false); } }); }
class ARMChallengeAuthenticationPolicy extends BearerTokenAuthenticationChallengePolicy { private static final Pattern AUTHENTICATION_CHALLENGE_PATTERN = Pattern.compile("(\\w+) ((?:\\w+=\".*?\"(?:, )?)+)(?:, )?"); private static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN = Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+"); private static final String CLAIMS_PARAMETER = "claims"; private final String[] scopes; private final AzureEnvironment environment; private static final String ARM_SCOPES_KEY = "ARMScopes"; /** * Creates ARMChallengeAuthenticationPolicy. * * @param credential the token credential to authenticate the request * @param environment the environment with endpoints for authentication * @param scopes the scopes used in credential, using default scopes when empty */ public ARMChallengeAuthenticationPolicy(TokenCredential credential, AzureEnvironment environment, String... scopes) { super(credential); this.scopes = scopes; this.environment = environment; } @Override public Mono<Void> onBeforeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { TokenRequestContext trc; String[] scopes = this.scopes; if (scopes == null || scopes.length == 0) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } trc = new TokenRequestContext().addScopes(scopes); context.setData(ARM_SCOPES_KEY, scopes); return authorizeRequest(context, trc); }); } @Override List<AuthenticationChallenge> parseChallenges(String header) { Matcher matcher = AUTHENTICATION_CHALLENGE_PATTERN.matcher(header); List<AuthenticationChallenge> challenges = new ArrayList<>(); while (matcher.find()) { challenges.add(new AuthenticationChallenge(matcher.group(1), matcher.group(2))); } return challenges; } Map<String, String> parseChallengeParams(String challengeParams) { Matcher matcher = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams); Map<String, String> challengeParameters = new HashMap<>(); while (matcher.find()) { challengeParameters.put(matcher.group(1), matcher.group(2)); } return challengeParameters; } }
class ARMChallengeAuthenticationPolicy extends BearerTokenAuthenticationChallengePolicy { private static final Pattern AUTHENTICATION_CHALLENGE_PATTERN = Pattern.compile("(\\w+) ((?:\\w+=\".*?\"(?:, )?)+)(?:, )?"); private static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN = Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+"); private static final String CLAIMS_PARAMETER = "claims"; private final String[] scopes; private final AzureEnvironment environment; private static final String ARM_SCOPES_KEY = "ARMScopes"; /** * Creates ARMChallengeAuthenticationPolicy. * * @param credential the token credential to authenticate the request * @param environment the environment with endpoints for authentication * @param scopes the scopes used in credential, using default scopes when empty */ public ARMChallengeAuthenticationPolicy(TokenCredential credential, AzureEnvironment environment, String... scopes) { super(credential); this.scopes = scopes; this.environment = environment; } @Override public Mono<Void> onBeforeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { String[] scopes = this.scopes; scopes = getScopes(context, scopes); context.setData(ARM_SCOPES_KEY, scopes); return authorizeRequest(context, new TokenRequestContext().addScopes(scopes)); }); } @Override private String[] getScopes(HttpPipelineCallContext context, String[] scopes) { if (CoreUtils.isNullOrEmpty(scopes)) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } return scopes; } List<AuthenticationChallenge> parseChallenges(String header) { Matcher matcher = AUTHENTICATION_CHALLENGE_PATTERN.matcher(header); List<AuthenticationChallenge> challenges = new ArrayList<>(); while (matcher.find()) { challenges.add(new AuthenticationChallenge(matcher.group(1), matcher.group(2))); } return challenges; } Map<String, String> parseChallengeParams(String challengeParams) { Matcher matcher = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams); Map<String, String> challengeParameters = new HashMap<>(); while (matcher.find()) { challengeParameters.put(matcher.group(1), matcher.group(2)); } return challengeParameters; } }
Removed ItemComparator and also improved the test to check for proper grouping of results and sorting inside each group
public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal.getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT r.id, r.propMixed FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> orderedIds = createdDocuments.stream() .sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"), o2.get("propMixed"))) .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .totalSize(orderedIds.size()) .exactlyContainsIdsInAnyOrder(orderedIds) .build(); validateQuerySuccess(queryFlux.byPage(pageSize), validator); }
.sorted((o1, o2) -> ItemComparator.getInstance().compare(o1.get("propMixed"),
public void queryOrderByMixedTypes(String sortOrder) throws Exception { List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(createdCollection.getId(), BridgeInternal .getContextClient(this.client)); assertThat(partitionKeyRanges.size()).isGreaterThan(1); String query = String.format("SELECT * FROM r ORDER BY r.propMixed ", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); List<String> sourceIds = createdDocuments.stream() .map(Resource::getId) .collect(Collectors.toList()); int pageSize = 20; CosmosPagedFlux<InternalObjectNode> queryFlux = createdCollection .queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryFlux.byPage(pageSize).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); List<InternalObjectNode> results = new ArrayList<>(); subscriber.values().forEach(feedResponse -> results.addAll(feedResponse.getResults())); assertThat(results.size()).isEqualTo(createdDocuments.size()); List<String> resultIds = results.stream().map(Resource::getId).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds); final List<String> typeList = Arrays.asList("undefined", "null", "boolean", "number", "string", "array", "object"); List<String> observedTypes = new ArrayList<>(); results.forEach(item -> { String propType = "undefined"; if (item.has(PROPTYPE)) { propType = item.getString(PROPTYPE); } System.out.println("item.get(PROPTYPE) = " + item.get(PROPTYPE)); System.out.println("propType = " + propType); if (!observedTypes.contains(propType)) { observedTypes.add(propType); } else { boolean equals = observedTypes.get(observedTypes.size() - 1).equals(propType); assertThat(equals).isTrue().as("Items of same type should be contiguous"); } }); assertThat(observedTypes).containsExactlyElementsOf(typeList); for (String type : typeList) { List<InternalObjectNode> items = results.stream().filter(r -> { if ("undefined".equals(type)) { return !r.has(PROPTYPE); } return type.equals(r.getString(PROPTYPE)); }).collect(Collectors.toList()); if ("boolean".equals(type)) { List<Boolean> sourceList = items.stream().map(n -> n.getBoolean(PROP_MIXED)).collect(Collectors.toList()); List<Boolean> toBeSortedList = new ArrayList<>(sourceList); toBeSortedList.sort(Comparator.comparing(Boolean::booleanValue)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } if ("number".equals(type)) { List<Number> numberList = items.stream().map(n -> (Number) n.get(PROP_MIXED)).collect(Collectors.toList()); List<Number> toBeSortedList = new ArrayList<>(numberList); Collections.copy(toBeSortedList, numberList); toBeSortedList.sort(Comparator.comparingDouble(Number::doubleValue)); assertThat(toBeSortedList).containsExactlyElementsOf(numberList); } if ("string".equals(type)) { List<String> sourceList = items.stream().map(n -> n.getString(PROP_MIXED)).collect(Collectors.toList()); List<String> toBeSortedList = new ArrayList<>(sourceList); Collections.copy(toBeSortedList, sourceList); toBeSortedList.sort(Comparator.comparing(String::valueOf)); assertThat(toBeSortedList).containsExactlyElementsOf(sourceList); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); keyValuePropsList.add(props); switch (i % 5) { case 0: props.put("propMixed", i); break; case 1: props.put("propMixed", String.valueOf(i)); break; case 2: props.put("propMixed", orderByArray); break; case 3: props.put("propMixed", orderByObject); break; case 4: props.put("propMixed", (float)i*3.17); break; default: break; } } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); } private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
class OrderbyDocumentQueryTest extends TestSuiteBase { public static final String PROPTYPE = "proptype"; public static final String PROP_MIXED = "propMixed"; private final double minQueryRequestChargePerPartition = 2.0; private CosmosAsyncClient client; private CosmosAsyncContainer createdCollection; private CosmosAsyncDatabase createdDatabase; private List<InternalObjectNode> createdDocuments = new ArrayList<>(); private int numberOfPartitions; @Factory(dataProvider = "clientBuildersWithDirect") public OrderbyDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocumentsValidateContent(Boolean qmEnabled) throws Exception { InternalObjectNode expectedDocument = createdDocuments.get(0); String query = String.format("SELECT * from root r where r.propStr = '%s'" + " ORDER BY r.propInt" , ModelBridgeInternal.getStringFromJsonSerializable(expectedDocument,"propStr")); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); if (qmEnabled != null) { options.setQueryMetricsEnabled(qmEnabled); } CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<String> expectedResourceIds = new ArrayList<>(); expectedResourceIds.add(expectedDocument.getResourceId()); Map<String, ResourceValidator<InternalObjectNode>> resourceIDToValidator = new HashMap<>(); resourceIDToValidator.put(expectedDocument.getResourceId(), new ResourceValidator.Builder<InternalObjectNode>().areEqual(expectedDocument).build()); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .numberOfPages(1) .containsExactly(expectedResourceIds) .validateAllResources(resourceIDToValidator) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>().hasRequestChargeHeader().build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() throws Exception { String query = "SELECT * from root r where r.id = '2' ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(new ArrayList<>()) .numberOfPages(1) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .build(); validateQuerySuccess(queryObservable.byPage(), validator); } @DataProvider(name = "sortOrder") public Object[][] sortOrder() { return new Object[][] { { "ASC" }, {"DESC"} }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderBy(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedResourceIds); } int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByWithValue(String sortOrder) throws Exception { String query = String.format("SELECT value r.propInt FROM r ORDER BY r.propInt %s", sortOrder); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<Integer> queryObservable = createdCollection.queryItems(query, options, Integer.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<Integer> expectedValues = sortDocumentsAndCollectValues("propInt", d -> ModelBridgeInternal .getIntFromJsonSerializable(d, "propInt"), validatorComparator); if ("DESC".equals(sortOrder)) { Collections.reverse(expectedValues); } int expectedPageSize = expectedNumberOfPages(expectedValues.size(), pageSize); FeedResponseListValidator<Integer> validator = new FeedResponseListValidator.Builder<Integer>() .containsExactlyValues(expectedValues) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<Integer>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByInt() throws Exception { String query = "SELECT * FROM r ORDER BY r.propInt"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryOrderByString() throws Exception { String query = "SELECT * FROM r ORDER BY r.propStr"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<String> validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propStr", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"propStr"), validatorComparator); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * minQueryRequestChargePerPartition) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } @DataProvider(name = "topValue") public Object[][] topValueParameter() { return new Object[][] { { 0 }, { 1 }, { 5 }, { createdDocuments.size() - 1 }, { createdDocuments.size() }, { createdDocuments.size() + 1 }, { 2 * createdDocuments.size() } }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "topValue") public void queryOrderWithTop(int topValue) throws Exception { String query = String.format("SELECT TOP %d * FROM r ORDER BY r.propInt", topValue); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); int pageSize = 3; CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); Comparator<Integer> validatorComparator = Comparator.nullsFirst(Comparator.<Integer>naturalOrder()); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator) .stream().limit(topValue).collect(Collectors.toList()); int expectedPageSize = expectedNumberOfPages(expectedResourceIds.size(), pageSize); FeedResponseListValidator<InternalObjectNode> validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedResourceIds) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .hasRequestChargeHeader().build()) .totalRequestChargeIsAtLeast(numberOfPartitions * (topValue > 0 ? minQueryRequestChargePerPartition : 1)) .build(); validateQuerySuccess(queryObservable.byPage(pageSize), validator); } private <T> List<String> sortDocumentsAndCollectResourceIds(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(Resource::getResourceId).collect(Collectors.toList()); } @SuppressWarnings("unchecked") private <T> List<T> sortDocumentsAndCollectValues(String propName, Function<InternalObjectNode, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .filter(d -> ModelBridgeInternal.getMapFromJsonSerializable(d).containsKey(propName)) .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> (T)ModelBridgeInternal.getMapFromJsonSerializable(d).get(propName)) .collect(Collectors.toList()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryScopedToSinglePartition_StartWithContinuationToken() throws Exception { String query = "SELECT * FROM r ORDER BY r.propScopedPartitionInt ASC"; CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("duplicateParitionKeyValue")); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> subscriber = new TestSubscriber<>(); queryObservable.byPage(3).take(1).subscribe(subscriber); subscriber.awaitTerminalEvent(); subscriber.assertComplete(); subscriber.assertNoErrors(); assertThat(subscriber.valueCount()).isEqualTo(1); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> page = (FeedResponse<InternalObjectNode>) subscriber.getEvents().get(0).get(0); assertThat(page.getResults()).hasSize(3); assertThat(page.getContinuationToken()).isNotEmpty(); queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); List<InternalObjectNode> expectedDocs = createdDocuments.stream() .filter(d -> (StringUtils.equals("duplicateParitionKeyValue", ModelBridgeInternal.getStringFromJsonSerializable(d,"mypk")))) .filter(d -> (ModelBridgeInternal.getIntFromJsonSerializable(d,"propScopedPartitionInt") > 2)).collect(Collectors.toList()); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(options); int expectedPageSize = (expectedDocs.size() + maxItemCount - 1) / maxItemCount; assertThat(expectedDocs).hasSize(10 - 3); FeedResponseListValidator<InternalObjectNode> validator = null; validator = new FeedResponseListValidator.Builder<InternalObjectNode>() .containsExactly(expectedDocs.stream() .sorted((e1, e2) -> Integer.compare(ModelBridgeInternal.getIntFromJsonSerializable(e1,"propScopedPartitionInt"), ModelBridgeInternal.getIntFromJsonSerializable(e2,"propScopedPartitionInt"))) .map(d -> d.getResourceId()).collect(Collectors.toList())) .numberOfPages(expectedPageSize) .allPagesSatisfy(new FeedResponseValidator.Builder<InternalObjectNode>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable.byPage(page.getContinuationToken()), validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void orderByContinuationTokenRoundTrip() throws Exception { { OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); String serialized = orderByContinuationToken.toString(); ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse(serialized, outOrderByContinuationToken)).isTrue(); OrderByContinuationToken deserialized = outOrderByContinuationToken.v; CompositeContinuationToken compositeContinuationToken = deserialized.getCompositeContinuationToken(); String token = compositeContinuationToken.getToken(); Range<String> range = compositeContinuationToken.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); QueryItem[] orderByItems = deserialized.getOrderByItems(); assertThat(orderByItems).isNotNull(); assertThat(orderByItems.length).isEqualTo(1); assertThat(orderByItems[0].getItem()).isEqualTo(42); String rid = deserialized.getRid(); assertThat(rid).isEqualTo("rid"); boolean inclusive = deserialized.getInclusive(); assertThat(inclusive).isEqualTo(false); } { ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<OrderByContinuationToken>(); assertThat(OrderByContinuationToken.tryParse("{\"property\" : \"Not a valid Order By Token\"}", outOrderByContinuationToken)).isFalse(); } } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder", retryAnalyzer = RetryAnalyzer.class) public void queryDocumentsWithOrderByContinuationTokensInteger(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.propInt %s", sortOrder); Comparator<Integer> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<Integer> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("propInt", d -> ModelBridgeInternal.getIntFromJsonSerializable(d,"propInt"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100}, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> order = sortOrder.equals("ASC")?Comparator.naturalOrder():Comparator.reverseOrder(); Comparator<String> validatorComparator = Comparator.nullsFirst(order); List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.queryWithContinuationTokensAndPageSizes(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = { "simple" }, timeOut = TIMEOUT * 10, dataProvider = "sortOrder") public void queryDocumentsWithInvalidOrderByContinuationTokensString(String sortOrder) throws Exception { String query = String.format("SELECT * FROM c ORDER BY c.id %s", sortOrder); Comparator<String> validatorComparator; if(sortOrder.equals("ASC")) { validatorComparator = Comparator.nullsFirst(Comparator.<String>naturalOrder()); }else{ validatorComparator = Comparator.nullsFirst(Comparator.<String>reverseOrder()); } List<String> expectedResourceIds = sortDocumentsAndCollectResourceIds("id", d -> ModelBridgeInternal.getStringFromJsonSerializable(d,"id"), validatorComparator); this.assertInvalidContinuationToken(query, new int[] { 1, 5, 10, 100 }, expectedResourceIds); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByArray(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propArray %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } @Test(groups = {"simple"}, timeOut = TIMEOUT, dataProvider = "sortOrder") public void queryOrderByObject(String sortOrder) throws Exception { String query = String.format("SELECT * FROM r ORDER BY r.propObject %s", sortOrder); int pageSize = 3; List<InternalObjectNode> results1 = this.queryWithContinuationTokens(query, pageSize); List<InternalObjectNode> results2 = this.queryWithContinuationTokens(query, this.createdDocuments.size()); assertThat(results1.stream().map(r -> r.getResourceId()).collect(Collectors.toList())) .containsExactlyElementsOf(results2.stream().limit(pageSize).map(r -> r.getResourceId()).collect(Collectors.toList())); } public InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, Map<String, Object> keyValueProps) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); return BridgeInternal.getProperties(cosmosContainer.createItem(docDefinition).block()); } public List<InternalObjectNode> bulkInsert(CosmosAsyncContainer cosmosContainer, List<Map<String, Object>> keyValuePropsList) { ArrayList<InternalObjectNode> result = new ArrayList<InternalObjectNode>(); for(Map<String, Object> keyValueProps: keyValuePropsList) { InternalObjectNode docDefinition = getDocumentDefinition(keyValueProps); result.add(docDefinition); } return bulkInsertBlocking(cosmosContainer, result); } @BeforeMethod(groups = { "simple" }) public void beforeMethod() throws Exception { TimeUnit.SECONDS.sleep(10); } @BeforeClass(groups = { "simple" }, timeOut = 4 * SETUP_TIMEOUT) public void before_OrderbyDocumentQueryTest() throws Exception { client = getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdCollection); List<Map<String, Object>> keyValuePropsList = new ArrayList<>(); Map<String, Object> props; boolean flag = false; for(int i = 0; i < 30; i++) { props = new HashMap<>(); props.put("propInt", i); props.put("propStr", String.valueOf(i)); List<Integer> orderByArray = new ArrayList<Integer>(); Map<String, String> orderByObject = new HashMap<>(); for (int k = 0; k < 3; k++) { orderByArray.add(k + i); orderByObject.put("key1", String.valueOf(i)); orderByObject.put("key2", String.valueOf(orderByArray.get(k))); } props.put("propArray", orderByArray); props.put("propObject", orderByObject); switch (i % 8) { case 0: props.put(PROP_MIXED, i); props.put(PROPTYPE, "number"); break; case 1: props.put(PROP_MIXED, String.valueOf(i)); props.put(PROPTYPE, "string"); break; case 2: props.put(PROP_MIXED, orderByArray); props.put(PROPTYPE, "array"); break; case 3: props.put(PROP_MIXED, orderByObject); props.put(PROPTYPE, "object"); break; case 4: props.put(PROP_MIXED, (float)i*3.17); props.put(PROPTYPE, "number"); break; case 5: props.put(PROP_MIXED, null); props.put(PROPTYPE, "null"); break; case 6: flag = !flag; props.put(PROP_MIXED, flag); props.put(PROPTYPE, "boolean"); break; default: break; } keyValuePropsList.add(props); } props = new HashMap<>(); keyValuePropsList.add(props); createdDocuments = bulkInsert(createdCollection, keyValuePropsList); for(int i = 0; i < 10; i++) { Map<String, Object> p = new HashMap<>(); p.put("propScopedPartitionInt", i); InternalObjectNode doc = getDocumentDefinition("duplicateParitionKeyValue", UUID.randomUUID().toString(), p); CosmosItemRequestOptions options = new CosmosItemRequestOptions(); createdDocuments.add(createDocument(createdCollection, doc)); } numberOfPartitions = CosmosBridgeInternal.getAsyncDocumentClient(client) .readPartitionKeyRanges("dbs/" + createdDatabase.getId() + "/colls/" + createdCollection.getId(), null) .flatMap(p -> Flux.fromIterable(p.getResults())).collectList().single().block().size(); waitIfNeededForReplicasToCatchUp(getClientBuilder()); updateCollectionIndex(); } private void updateCollectionIndex() { CosmosContainerProperties containerProperties = createdCollection.read().block().getProperties(); IndexingPolicy indexingPolicy = containerProperties.getIndexingPolicy(); List<IncludedPath> includedPaths = indexingPolicy.getIncludedPaths(); IncludedPath includedPath = new IncludedPath("/propMixed/?"); if (!includedPaths.contains(includedPath)) { includedPaths.add(includedPath); indexingPolicy.setIncludedPaths(includedPaths); containerProperties.setIndexingPolicy(indexingPolicy); createdCollection.replace(containerProperties).block(); } } @AfterClass(groups = { "simple" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private void assertInvalidContinuationToken(String query, int[] pageSize, List<String> expectedIds) { String requestContinuation = null; do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); OrderByContinuationToken orderByContinuationToken = new OrderByContinuationToken( new CompositeContinuationToken( "asdf", new Range<String>("A", "D", false, true)), new QueryItem[] {new QueryItem("{\"item\" : 42}")}, "rid", false); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(orderByContinuationToken.toString(),1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertError(CosmosException.class); } while (requestContinuation != null); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<String> expectedIds) { for (int pageSize : pageSizes) { List<InternalObjectNode> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (InternalObjectNode document : receivedDocuments) { actualIds.add(document.getResourceId()); } assertThat(actualIds).containsExactlyElementsOf(expectedIds); } } private List<InternalObjectNode> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<InternalObjectNode> receivedDocuments = new ArrayList<InternalObjectNode>(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<InternalObjectNode> queryObservable = createdCollection.queryItems(query, options, InternalObjectNode.class); TestSubscriber<FeedResponse<InternalObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<InternalObjectNode> firstPage = (FeedResponse<InternalObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } private static InternalObjectNode getDocumentDefinition(String partitionKey, String id, Map<String, Object> keyValuePair) { StringBuilder sb = new StringBuilder(); sb.append("{\n"); for(String key: keyValuePair.keySet()) { Object val = keyValuePair.get(key); sb.append(" "); sb.append("\"").append(key).append("\"").append(" :" ); if (val == null) { sb.append("null"); } else { sb.append(toJson(val)); } sb.append(",\n"); } sb.append(String.format(" \"id\": \"%s\",\n", id)); sb.append(String.format(" \"mypk\": \"%s\"\n", partitionKey)); sb.append("}"); return new InternalObjectNode(sb.toString()); } private static InternalObjectNode getDocumentDefinition(Map<String, Object> keyValuePair) { String uuid = UUID.randomUUID().toString(); return getDocumentDefinition(uuid, uuid, keyValuePair); } private static String toJson(Object object){ try { return Utils.getSimpleObjectMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } }
it won't return null, an empty map currently.
public Mono<Boolean> onChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { String authHeader = response.getHeaderValue(WWW_AUTHENTICATE); if (response.getStatusCode() == 401 && authHeader != null) { List<AuthenticationChallenge> challenges = parseChallenges(authHeader); for (AuthenticationChallenge authenticationChallenge : challenges) { Map<String, String> extractedChallengeParams = parseChallengeParams(authenticationChallenge.getChallengeParameters()); if (extractedChallengeParams.containsKey(CLAIMS_PARAMETER)) { String claims = new String(Base64.getUrlDecoder() .decode(extractedChallengeParams.get(CLAIMS_PARAMETER)), StandardCharsets.UTF_8); String[] scopes; try { scopes = (String[]) context.getData(ARM_SCOPES_KEY).get(); } catch (NoSuchElementException e) { scopes = this.scopes; } if (scopes == null || scopes.length == 0) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } return authorizeRequest(context, new TokenRequestContext() .addScopes(scopes).setClaims(claims)) .flatMap(b -> Mono.just(true)); } } } return Mono.just(false); }); }
parseChallengeParams(authenticationChallenge.getChallengeParameters());
public Mono<Boolean> onChallenge(HttpPipelineCallContext context, HttpResponse response) { return Mono.defer(() -> { String authHeader = response.getHeaderValue(WWW_AUTHENTICATE); if (!(response.getStatusCode() == 401 && authHeader != null)) { return Mono.just(false); } else { List<AuthenticationChallenge> challenges = parseChallenges(authHeader); for (AuthenticationChallenge authenticationChallenge : challenges) { Map<String, String> extractedChallengeParams = parseChallengeParams(authenticationChallenge.getChallengeParameters()); if (extractedChallengeParams.containsKey(CLAIMS_PARAMETER)) { String claims = new String(Base64.getUrlDecoder() .decode(extractedChallengeParams.get(CLAIMS_PARAMETER)), StandardCharsets.UTF_8); String[] scopes; try { scopes = (String[]) context.getData(ARM_SCOPES_KEY).get(); } catch (NoSuchElementException e) { scopes = this.scopes; } scopes = getScopes(context, scopes); return authorizeRequest(context, new TokenRequestContext() .addScopes(scopes).setClaims(claims)) .flatMap(b -> Mono.just(true)); } } return Mono.just(false); } }); }
class ARMChallengeAuthenticationPolicy extends BearerTokenAuthenticationChallengePolicy { private static final Pattern AUTHENTICATION_CHALLENGE_PATTERN = Pattern.compile("(\\w+) ((?:\\w+=\".*?\"(?:, )?)+)(?:, )?"); private static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN = Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+"); private static final String CLAIMS_PARAMETER = "claims"; private final String[] scopes; private final AzureEnvironment environment; private static final String ARM_SCOPES_KEY = "ARMScopes"; /** * Creates ARMChallengeAuthenticationPolicy. * * @param credential the token credential to authenticate the request * @param environment the environment with endpoints for authentication * @param scopes the scopes used in credential, using default scopes when empty */ public ARMChallengeAuthenticationPolicy(TokenCredential credential, AzureEnvironment environment, String... scopes) { super(credential); this.scopes = scopes; this.environment = environment; } @Override public Mono<Void> onBeforeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { TokenRequestContext trc; String[] scopes = this.scopes; if (scopes == null || scopes.length == 0) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } trc = new TokenRequestContext().addScopes(scopes); context.setData(ARM_SCOPES_KEY, scopes); return authorizeRequest(context, trc); }); } @Override List<AuthenticationChallenge> parseChallenges(String header) { Matcher matcher = AUTHENTICATION_CHALLENGE_PATTERN.matcher(header); List<AuthenticationChallenge> challenges = new ArrayList<>(); while (matcher.find()) { challenges.add(new AuthenticationChallenge(matcher.group(1), matcher.group(2))); } return challenges; } Map<String, String> parseChallengeParams(String challengeParams) { Matcher matcher = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams); Map<String, String> challengeParameters = new HashMap<>(); while (matcher.find()) { challengeParameters.put(matcher.group(1), matcher.group(2)); } return challengeParameters; } }
class ARMChallengeAuthenticationPolicy extends BearerTokenAuthenticationChallengePolicy { private static final Pattern AUTHENTICATION_CHALLENGE_PATTERN = Pattern.compile("(\\w+) ((?:\\w+=\".*?\"(?:, )?)+)(?:, )?"); private static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN = Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+"); private static final String CLAIMS_PARAMETER = "claims"; private final String[] scopes; private final AzureEnvironment environment; private static final String ARM_SCOPES_KEY = "ARMScopes"; /** * Creates ARMChallengeAuthenticationPolicy. * * @param credential the token credential to authenticate the request * @param environment the environment with endpoints for authentication * @param scopes the scopes used in credential, using default scopes when empty */ public ARMChallengeAuthenticationPolicy(TokenCredential credential, AzureEnvironment environment, String... scopes) { super(credential); this.scopes = scopes; this.environment = environment; } @Override public Mono<Void> onBeforeRequest(HttpPipelineCallContext context) { return Mono.defer(() -> { String[] scopes = this.scopes; scopes = getScopes(context, scopes); context.setData(ARM_SCOPES_KEY, scopes); return authorizeRequest(context, new TokenRequestContext().addScopes(scopes)); }); } @Override private String[] getScopes(HttpPipelineCallContext context, String[] scopes) { if (CoreUtils.isNullOrEmpty(scopes)) { scopes = new String[1]; scopes[0] = ARMScopeHelper.getDefaultScopeFromRequest( context.getHttpRequest(), environment); } return scopes; } List<AuthenticationChallenge> parseChallenges(String header) { Matcher matcher = AUTHENTICATION_CHALLENGE_PATTERN.matcher(header); List<AuthenticationChallenge> challenges = new ArrayList<>(); while (matcher.find()) { challenges.add(new AuthenticationChallenge(matcher.group(1), matcher.group(2))); } return challenges; } Map<String, String> parseChallengeParams(String challengeParams) { Matcher matcher = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams); Map<String, String> challengeParameters = new HashMap<>(); while (matcher.find()) { challengeParameters.put(matcher.group(1), matcher.group(2)); } return challengeParameters; } }
final ErrorResponse errorResponse = errorException.getValue(); com.azure.ai.textanalytics.models.TextAnalyticsError textAnalyticsError = null; if (errorResponse != null && errorResponse.getError() != null) { textAnalyticsError = toTextAnalyticsError(errorResponse.getError()); } return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), textAnalyticsError);
public static Throwable mapToHttpResponseExceptionIfExists(Throwable throwable) { if (throwable instanceof ErrorResponseException) { ErrorResponseException errorException = (ErrorResponseException) throwable; if (errorException.getValue() != null) { return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), toTextAnalyticsError(errorException.getValue().getError())); } else { return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), null); } } return throwable; }
if (errorException.getValue() != null) {
public static Throwable mapToHttpResponseExceptionIfExists(Throwable throwable) { if (throwable instanceof ErrorResponseException) { ErrorResponseException errorException = (ErrorResponseException) throwable; final ErrorResponse errorResponse = errorException.getValue(); com.azure.ai.textanalytics.models.TextAnalyticsError textAnalyticsError = null; if (errorResponse != null && errorResponse.getError() != null) { textAnalyticsError = toTextAnalyticsError(errorResponse.getError()); } return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), textAnalyticsError); } return throwable; }
class Utility { private static final ClientLogger LOGGER = new ClientLogger(Utility.class); public static final Duration DEFAULT_POLL_INTERVAL = Duration.ofSeconds(5); private Utility() { } /** * Verify that list of documents are not null or empty. Otherwise, throw exception. * * @param documents A list of documents. * * @throws NullPointerException if {@code documents} is null. * @throws IllegalArgumentException if {@code documents} is empty. */ public static void inputDocumentsValidation(Iterable<?> documents) { Objects.requireNonNull(documents, "'documents' cannot be null."); final Iterator<?> iterator = documents.iterator(); if (!iterator.hasNext()) { throw new IllegalArgumentException("'documents' cannot be empty."); } } /** * Get a mock {@link HttpResponse} that only return status code 400. * * @param response A {@link SimpleResponse} with any type * @return A mock {@link HttpResponse} that only return status code 400. */ public static HttpResponse getEmptyErrorIdHttpResponse(SimpleResponse<?> response) { return new HttpResponse(response.getRequest()) { @Override public int getStatusCode() { return 400; } @Override public String getHeaderValue(String s) { return null; } @Override public HttpHeaders getHeaders() { return null; } @Override public Flux<ByteBuffer> getBody() { return null; } @Override public Mono<byte[]> getBodyAsByteArray() { return null; } @Override public Mono<String> getBodyAsString() { return null; } @Override public Mono<String> getBodyAsString(Charset charset) { return null; } }; } /** * Mapping a {@link ErrorResponseException} to {@link HttpResponseException} if exist. Otherwise, return * original {@link Throwable}. * * @param throwable A {@link Throwable}. * @return A {@link HttpResponseException} or the original throwable type. */ /** * Given a list of documents will apply the indexing function to it and return the updated list. * * @param documents the inputs to apply the mapping function to. * @param mappingFunction the function which applies the index to the incoming input value. * @param <T> the type of items being returned in the list. * @return The list holding all the generic items combined. */ public static <T> List<T> mapByIndex(Iterable<String> documents, BiFunction<String, String, T> mappingFunction) { Objects.requireNonNull(documents, "'documents' cannot be null."); AtomicInteger i = new AtomicInteger(0); List<T> result = new ArrayList<>(); documents.forEach(document -> result.add(mappingFunction.apply(String.valueOf(i.getAndIncrement()), document)) ); return result; } /** * Convert {@link DocumentStatistics} to {@link TextDocumentStatistics} * * @param statistics the {@link DocumentStatistics} provided by the service. * @return the {@link TextDocumentStatistics} returned by the SDK. */ public static TextDocumentStatistics toTextDocumentStatistics(DocumentStatistics statistics) { return new TextDocumentStatistics(statistics.getCharactersCount(), statistics.getTransactionsCount()); } /** * Convert {@link RequestStatistics} to {@link TextDocumentBatchStatistics} * * @param statistics the {@link RequestStatistics} provided by the service. * @return the {@link TextDocumentBatchStatistics} returned by the SDK. */ public static TextDocumentBatchStatistics toBatchStatistics(RequestStatistics statistics) { return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getValidDocumentsCount(), statistics.getErroneousDocumentsCount(), statistics.getTransactionsCount()); } /** * Convert {@link TextAnalyticsError} to {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * This function maps the service returned {@link TextAnalyticsError inner error} to the top level * {@link com.azure.ai.textanalytics.models.TextAnalyticsError error}, if inner error present. * * @param textAnalyticsError the {@link TextAnalyticsError} returned by the service. * @return the {@link com.azure.ai.textanalytics.models.TextAnalyticsError} returned by the SDK. */ public static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyticsError( TextAnalyticsError textAnalyticsError) { final InnerError innerError = textAnalyticsError.getInnererror(); if (innerError == null) { final ErrorCodeValue errorCodeValue = textAnalyticsError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(errorCodeValue == null ? null : errorCodeValue.toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget()); } final InnerErrorCodeValue innerErrorCodeValue = innerError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(innerErrorCodeValue == null ? null : innerErrorCodeValue.toString()), innerError.getMessage(), innerError.getTarget()); } /** * Convert the incoming input {@link TextDocumentInput} to the service expected {@link MultiLanguageInput}. * * @param documents the user provided input in {@link TextDocumentInput} * @return the service required input {@link MultiLanguageInput} */ public static List<MultiLanguageInput> toMultiLanguageInput(Iterable<TextDocumentInput> documents) { List<MultiLanguageInput> multiLanguageInputs = new ArrayList<>(); for (TextDocumentInput textDocumentInput : documents) { multiLanguageInputs.add(new MultiLanguageInput().setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()).setLanguage(textDocumentInput.getLanguage())); } return multiLanguageInputs; } /** * Convert the incoming input {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * to a {@link TextAnalyticsException}. * * @param error the {@link com.azure.ai.textanalytics.models.TextAnalyticsError}. * @return the {@link TextAnalyticsException} to be thrown. */ public static TextAnalyticsException toTextAnalyticsException( com.azure.ai.textanalytics.models.TextAnalyticsError error) { return new TextAnalyticsException(error.getMessage(), error.getErrorCode(), error.getTarget()); } /** * Convert to a list of {@link LanguageInput} from {@link DetectLanguageInput}. * * @param documents The list of documents to detect languages for. * * @return a list of {@link LanguageInput}. */ public static List<LanguageInput> toLanguageInput(Iterable<DetectLanguageInput> documents) { final List<LanguageInput> multiLanguageInputs = new ArrayList<>(); documents.forEach(textDocumentInput -> multiLanguageInputs.add(new LanguageInput() .setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()) .setCountryHint(textDocumentInput.getCountryHint()))); return multiLanguageInputs; } /** * Extracts the operation ID from the 'operation-location' URL. An example of 'operation-location' is * https: * * @param operationLocation The URL specified in the 'Operation-Location' response header containing the * operation ID used to track the progress and obtain the ID of the analyze operation. * * @return The operation ID that tracks the long running operation progress. */ public static String parseOperationId(String operationLocation) { if (!CoreUtils.isNullOrEmpty(operationLocation)) { int lastIndex = operationLocation.lastIndexOf('/'); if (lastIndex != -1) { return operationLocation.substring(lastIndex + 1); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse operation header for operation Id from: " + operationLocation)); } /** * Extract the next pagination link which contains the request parameter values, into map, * such as '$skip=20' and '$top=2'. * * @param nextLink the next pagination link. * * @return A map that holds the request parameter value of next pagination link. */ public static Map<String, Integer> parseNextLink(String nextLink) { if (!CoreUtils.isNullOrEmpty(nextLink)) { Map<String, Integer> parameterMap = new HashMap<>(); String[] strings = nextLink.split("\\?", 2); String[] parameters = strings[1].split("&"); for (String parameter : parameters) { String[] parameterPair = parameter.split("="); parameterMap.put(parameterPair[0], Integer.valueOf(parameterPair[1])); } return parameterMap; } return new HashMap<>(); } public static RecognizeEntitiesResultCollection toRecognizeEntitiesResultCollectionResponse( final EntitiesResult entitiesResult) { List<RecognizeEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); entitiesResult.getDocuments().forEach(documentEntities -> recognizeEntitiesResults.add(new RecognizeEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new CategorizedEntityCollection( new IterableStream<>(documentEntities.getEntities().stream().map(entity -> { final CategorizedEntity categorizedEntity = new CategorizedEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset()); CategorizedEntityPropertiesHelper.setLength(categorizedEntity, entity.getLength()); return categorizedEntity; }).collect(Collectors.toList())), new IterableStream<>(documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))) ))); for (DocumentError documentError : entitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizeEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizeEntitiesResultCollection(recognizeEntitiesResults, entitiesResult.getModelVersion(), entitiesResult.getStatistics() == null ? null : toBatchStatistics(entitiesResult.getStatistics())); } public static RecognizePiiEntitiesResultCollection toRecognizePiiEntitiesResultCollection( final PiiResult piiEntitiesResult) { final List<RecognizePiiEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); piiEntitiesResult.getDocuments().forEach(documentEntities -> { final List<PiiEntity> piiEntities = documentEntities.getEntities().stream().map(entity -> new PiiEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset())) .collect(Collectors.toList()); final List<TextAnalyticsWarning> warnings = documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()); recognizeEntitiesResults.add(new RecognizePiiEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new PiiEntityCollection(new IterableStream<>(piiEntities), documentEntities.getRedactedText(), new IterableStream<>(warnings)) )); }); for (DocumentError documentError : piiEntitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizePiiEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizePiiEntitiesResultCollection(recognizeEntitiesResults, piiEntitiesResult.getModelVersion(), piiEntitiesResult.getStatistics() == null ? null : toBatchStatistics(piiEntitiesResult.getStatistics())); } public static ExtractKeyPhrasesResultCollection toExtractKeyPhrasesResultCollection( final KeyPhraseResult keyPhraseResult) { final List<ExtractKeyPhraseResult> keyPhraseResultList = new ArrayList<>(); for (DocumentKeyPhrases documentKeyPhrases : keyPhraseResult.getDocuments()) { final String documentId = documentKeyPhrases.getId(); keyPhraseResultList.add(new ExtractKeyPhraseResult( documentId, documentKeyPhrases.getStatistics() == null ? null : toTextDocumentStatistics(documentKeyPhrases.getStatistics()), null, new KeyPhrasesCollection( new IterableStream<>(documentKeyPhrases.getKeyPhrases()), new IterableStream<>(documentKeyPhrases.getWarnings().stream().map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))))); } for (DocumentError documentError : keyPhraseResult.getErrors()) { keyPhraseResultList.add(new ExtractKeyPhraseResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new ExtractKeyPhrasesResultCollection(keyPhraseResultList, keyPhraseResult.getModelVersion(), keyPhraseResult.getStatistics() == null ? null : toBatchStatistics(keyPhraseResult.getStatistics())); } /** * Transfer {@link HealthcareResult} into {@link IterableStream} of {@link AnalyzeHealthcareEntitiesResult}. * * @param healthcareResult the service side raw data, HealthcareResult. * * @return the client side explored model, RecognizeHealthcareEntitiesResultCollection. */ public static IterableStream<AnalyzeHealthcareEntitiesResult> toRecognizeHealthcareEntitiesResults( HealthcareResult healthcareResult) { List<AnalyzeHealthcareEntitiesResult> analyzeHealthcareEntitiesResults = new ArrayList<>(); healthcareResult.getDocuments().forEach( documentEntities -> { final List<TextAnalyticsWarning> warnings = Optional.ofNullable(documentEntities.getWarnings()) .map(textAnalyticsWarnings -> textAnalyticsWarnings.stream().map( textAnalyticsWarning -> new TextAnalyticsWarning( Optional.ofNullable(textAnalyticsWarning.getCode()) .map(warningCodeValue -> WarningCode.fromString(warningCodeValue.toString())) .orElse(null), textAnalyticsWarning.getMessage()) ).collect(Collectors.toList()) ).orElse(new ArrayList<>()); final List<HealthcareEntity> healthcareEntities = documentEntities.getEntities().stream().map( entity -> { final HealthcareEntity healthcareEntity = new HealthcareEntity(); HealthcareEntityPropertiesHelper.setText(healthcareEntity, entity.getText()); HealthcareEntityPropertiesHelper.setCategory(healthcareEntity, entity.getCategory()); HealthcareEntityPropertiesHelper.setConfidenceScore(healthcareEntity, entity.getConfidenceScore()); HealthcareEntityPropertiesHelper.setOffset(healthcareEntity, entity.getOffset()); HealthcareEntityPropertiesHelper.setLength(healthcareEntity, entity.getLength()); HealthcareEntityPropertiesHelper.setDataSources(healthcareEntity, entity.getLinks() == null ? null : IterableStream.of(entity.getLinks().stream() .map(healthcareEntityLink -> { final EntityDataSource entityDataSourceOrigin = new EntityDataSource(); EntityDataSourcePropertiesHelper.setName(entityDataSourceOrigin, healthcareEntityLink.getDataSource()); EntityDataSourcePropertiesHelper.setEntityId( entityDataSourceOrigin, healthcareEntityLink.getId()); return entityDataSourceOrigin; }) .collect(Collectors.toList()))); HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, new HashMap<>()); return healthcareEntity; }).collect(Collectors.toList()); Map<HealthcareEntity, Map<HealthcareEntity, HealthcareEntityRelationType>> entityRelationMap = new HashMap<>(); if (!CoreUtils.isNullOrEmpty(documentEntities.getRelations())) { documentEntities.getRelations().forEach(healthcareRelation -> { final HealthcareEntity targetEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getTarget())); final HealthcareEntity sourceEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getSource())); final HealthcareEntityRelationType relationType = HealthcareEntityRelationType.fromString(healthcareRelation.getRelationType()); final Map<HealthcareEntity, HealthcareEntityRelationType> targetRelatedEntity = entityRelationMap.getOrDefault(targetEntity, new HashMap<>()); targetRelatedEntity.putIfAbsent(sourceEntity, relationType); entityRelationMap.putIfAbsent(targetEntity, targetRelatedEntity); if (healthcareRelation.isBidirectional()) { final Map<HealthcareEntity, HealthcareEntityRelationType> sourceRelatedEntity = entityRelationMap.getOrDefault(sourceEntity, new HashMap<>()); sourceRelatedEntity.putIfAbsent(targetEntity, relationType); entityRelationMap.putIfAbsent(sourceEntity, sourceRelatedEntity); } }); } healthcareEntities.forEach(healthcareEntity -> { if (entityRelationMap.containsKey(healthcareEntity)) { HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, entityRelationMap.get(healthcareEntity)); } }); final AnalyzeHealthcareEntitiesResult analyzeHealthcareEntitiesResult = new AnalyzeHealthcareEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null); AnalyzeHealthcareEntitiesResultPropertiesHelper.setEntities(analyzeHealthcareEntitiesResult, IterableStream.of(healthcareEntities)); AnalyzeHealthcareEntitiesResultPropertiesHelper.setWarnings(analyzeHealthcareEntitiesResult, IterableStream.of(warnings)); analyzeHealthcareEntitiesResults.add(analyzeHealthcareEntitiesResult); }); healthcareResult.getErrors().forEach(documentError -> analyzeHealthcareEntitiesResults.add(new AnalyzeHealthcareEntitiesResult( documentError.getId(), null, toTextAnalyticsError(documentError.getError()))) ); return IterableStream.of(analyzeHealthcareEntitiesResults); } /** * Helper function that parse healthcare entity index from the given entity reference string. * The entity reference format is " * * @param entityReference the given healthcare entity reference string. * * @return the healthcare entity index. */ private static Integer getHealthcareEntityIndex(String entityReference) { if (!CoreUtils.isNullOrEmpty(entityReference)) { int lastIndex = entityReference.lastIndexOf('/'); if (lastIndex != -1) { return Integer.parseInt(entityReference.substring(lastIndex + 1)); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse healthcare entity index from: " + entityReference)); } /** * Transfer {@link com.azure.ai.textanalytics.models.StringIndexType} into auto-generated {@link StringIndexType}. * If value is null, use the default type for java, UTF16CODE_UNIT. * * @param stringIndexType The public explored StringIndexType. * * @return The autogenerated internally used StringIndexType. */ public static StringIndexType getNonNullStringIndexType( com.azure.ai.textanalytics.models.StringIndexType stringIndexType) { return stringIndexType == null ? StringIndexType.UTF16CODE_UNIT : StringIndexType.fromString(stringIndexType.toString()); } /** * Get the non-null {@link Context}. The default value is {@link Context * * @param context It offers a means of passing arbitrary data (key-value pairs) to pipeline policies. * Most applications do not need to pass arbitrary data to the pipeline and can pass Context.NONE or null. * * @return The Context. */ public static Context getNotNullContext(Context context) { return context == null ? Context.NONE : context; } }
class Utility { private static final ClientLogger LOGGER = new ClientLogger(Utility.class); public static final Duration DEFAULT_POLL_INTERVAL = Duration.ofSeconds(5); private Utility() { } /** * Verify that list of documents are not null or empty. Otherwise, throw exception. * * @param documents A list of documents. * * @throws NullPointerException if {@code documents} is null. * @throws IllegalArgumentException if {@code documents} is empty. */ public static void inputDocumentsValidation(Iterable<?> documents) { Objects.requireNonNull(documents, "'documents' cannot be null."); final Iterator<?> iterator = documents.iterator(); if (!iterator.hasNext()) { throw new IllegalArgumentException("'documents' cannot be empty."); } } /** * Mapping a {@link ErrorResponseException} to {@link HttpResponseException} if exist. Otherwise, return * original {@link Throwable}. * * @param throwable A {@link Throwable}. * @return A {@link HttpResponseException} or the original throwable type. */ /** * Given a list of documents will apply the indexing function to it and return the updated list. * * @param documents the inputs to apply the mapping function to. * @param mappingFunction the function which applies the index to the incoming input value. * @param <T> the type of items being returned in the list. * @return The list holding all the generic items combined. */ public static <T> List<T> mapByIndex(Iterable<String> documents, BiFunction<String, String, T> mappingFunction) { Objects.requireNonNull(documents, "'documents' cannot be null."); AtomicInteger i = new AtomicInteger(0); List<T> result = new ArrayList<>(); documents.forEach(document -> result.add(mappingFunction.apply(String.valueOf(i.getAndIncrement()), document)) ); return result; } /** * Convert {@link DocumentStatistics} to {@link TextDocumentStatistics} * * @param statistics the {@link DocumentStatistics} provided by the service. * @return the {@link TextDocumentStatistics} returned by the SDK. */ public static TextDocumentStatistics toTextDocumentStatistics(DocumentStatistics statistics) { return new TextDocumentStatistics(statistics.getCharactersCount(), statistics.getTransactionsCount()); } /** * Convert {@link RequestStatistics} to {@link TextDocumentBatchStatistics} * * @param statistics the {@link RequestStatistics} provided by the service. * @return the {@link TextDocumentBatchStatistics} returned by the SDK. */ public static TextDocumentBatchStatistics toBatchStatistics(RequestStatistics statistics) { return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getValidDocumentsCount(), statistics.getErroneousDocumentsCount(), statistics.getTransactionsCount()); } /** * Convert {@link TextAnalyticsError} to {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * This function maps the service returned {@link TextAnalyticsError inner error} to the top level * {@link com.azure.ai.textanalytics.models.TextAnalyticsError error}, if inner error present. * * @param textAnalyticsError the {@link TextAnalyticsError} returned by the service. * @return the {@link com.azure.ai.textanalytics.models.TextAnalyticsError} returned by the SDK. */ public static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyticsError( TextAnalyticsError textAnalyticsError) { final InnerError innerError = textAnalyticsError.getInnererror(); if (innerError == null) { final ErrorCodeValue errorCodeValue = textAnalyticsError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(errorCodeValue == null ? null : errorCodeValue.toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget()); } final InnerErrorCodeValue innerErrorCodeValue = innerError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(innerErrorCodeValue == null ? null : innerErrorCodeValue.toString()), innerError.getMessage(), innerError.getTarget()); } /** * Convert the incoming input {@link TextDocumentInput} to the service expected {@link MultiLanguageInput}. * * @param documents the user provided input in {@link TextDocumentInput} * @return the service required input {@link MultiLanguageInput} */ public static List<MultiLanguageInput> toMultiLanguageInput(Iterable<TextDocumentInput> documents) { List<MultiLanguageInput> multiLanguageInputs = new ArrayList<>(); for (TextDocumentInput textDocumentInput : documents) { multiLanguageInputs.add(new MultiLanguageInput().setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()).setLanguage(textDocumentInput.getLanguage())); } return multiLanguageInputs; } /** * Convert the incoming input {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * to a {@link TextAnalyticsException}. * * @param error the {@link com.azure.ai.textanalytics.models.TextAnalyticsError}. * @return the {@link TextAnalyticsException} to be thrown. */ public static TextAnalyticsException toTextAnalyticsException( com.azure.ai.textanalytics.models.TextAnalyticsError error) { return new TextAnalyticsException(error.getMessage(), error.getErrorCode(), error.getTarget()); } /** * Convert to a list of {@link LanguageInput} from {@link DetectLanguageInput}. * * @param documents The list of documents to detect languages for. * * @return a list of {@link LanguageInput}. */ public static List<LanguageInput> toLanguageInput(Iterable<DetectLanguageInput> documents) { final List<LanguageInput> multiLanguageInputs = new ArrayList<>(); documents.forEach(textDocumentInput -> multiLanguageInputs.add(new LanguageInput() .setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()) .setCountryHint(textDocumentInput.getCountryHint()))); return multiLanguageInputs; } /** * Extracts the operation ID from the 'operation-location' URL. An example of 'operation-location' is * https: * * @param operationLocation The URL specified in the 'Operation-Location' response header containing the * operation ID used to track the progress and obtain the ID of the analyze operation. * * @return The operation ID that tracks the long running operation progress. */ public static String parseOperationId(String operationLocation) { if (!CoreUtils.isNullOrEmpty(operationLocation)) { int lastIndex = operationLocation.lastIndexOf('/'); if (lastIndex != -1) { return operationLocation.substring(lastIndex + 1); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse operation header for operation Id from: " + operationLocation)); } /** * Extract the next pagination link which contains the request parameter values, into map, * such as '$skip=20' and '$top=2'. * * @param nextLink the next pagination link. * * @return A map that holds the request parameter value of next pagination link. */ public static Map<String, Integer> parseNextLink(String nextLink) { if (!CoreUtils.isNullOrEmpty(nextLink)) { Map<String, Integer> parameterMap = new HashMap<>(); String[] strings = nextLink.split("\\?", 2); String[] parameters = strings[1].split("&"); for (String parameter : parameters) { String[] parameterPair = parameter.split("="); parameterMap.put(parameterPair[0], Integer.valueOf(parameterPair[1])); } return parameterMap; } return new HashMap<>(); } public static RecognizeEntitiesResultCollection toRecognizeEntitiesResultCollectionResponse( final EntitiesResult entitiesResult) { List<RecognizeEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); entitiesResult.getDocuments().forEach(documentEntities -> recognizeEntitiesResults.add(new RecognizeEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new CategorizedEntityCollection( new IterableStream<>(documentEntities.getEntities().stream().map(entity -> { final CategorizedEntity categorizedEntity = new CategorizedEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset()); CategorizedEntityPropertiesHelper.setLength(categorizedEntity, entity.getLength()); return categorizedEntity; }).collect(Collectors.toList())), new IterableStream<>(documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))) ))); for (DocumentError documentError : entitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizeEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizeEntitiesResultCollection(recognizeEntitiesResults, entitiesResult.getModelVersion(), entitiesResult.getStatistics() == null ? null : toBatchStatistics(entitiesResult.getStatistics())); } public static RecognizePiiEntitiesResultCollection toRecognizePiiEntitiesResultCollection( final PiiResult piiEntitiesResult) { final List<RecognizePiiEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); piiEntitiesResult.getDocuments().forEach(documentEntities -> { final List<PiiEntity> piiEntities = documentEntities.getEntities().stream().map(entity -> new PiiEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset())) .collect(Collectors.toList()); final List<TextAnalyticsWarning> warnings = documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()); recognizeEntitiesResults.add(new RecognizePiiEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new PiiEntityCollection(new IterableStream<>(piiEntities), documentEntities.getRedactedText(), new IterableStream<>(warnings)) )); }); for (DocumentError documentError : piiEntitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizePiiEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizePiiEntitiesResultCollection(recognizeEntitiesResults, piiEntitiesResult.getModelVersion(), piiEntitiesResult.getStatistics() == null ? null : toBatchStatistics(piiEntitiesResult.getStatistics())); } public static ExtractKeyPhrasesResultCollection toExtractKeyPhrasesResultCollection( final KeyPhraseResult keyPhraseResult) { final List<ExtractKeyPhraseResult> keyPhraseResultList = new ArrayList<>(); for (DocumentKeyPhrases documentKeyPhrases : keyPhraseResult.getDocuments()) { final String documentId = documentKeyPhrases.getId(); keyPhraseResultList.add(new ExtractKeyPhraseResult( documentId, documentKeyPhrases.getStatistics() == null ? null : toTextDocumentStatistics(documentKeyPhrases.getStatistics()), null, new KeyPhrasesCollection( new IterableStream<>(documentKeyPhrases.getKeyPhrases()), new IterableStream<>(documentKeyPhrases.getWarnings().stream().map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))))); } for (DocumentError documentError : keyPhraseResult.getErrors()) { keyPhraseResultList.add(new ExtractKeyPhraseResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new ExtractKeyPhrasesResultCollection(keyPhraseResultList, keyPhraseResult.getModelVersion(), keyPhraseResult.getStatistics() == null ? null : toBatchStatistics(keyPhraseResult.getStatistics())); } /** * Transfer {@link HealthcareResult} into {@link IterableStream} of {@link AnalyzeHealthcareEntitiesResult}. * * @param healthcareResult the service side raw data, HealthcareResult. * * @return the client side explored model, RecognizeHealthcareEntitiesResultCollection. */ public static IterableStream<AnalyzeHealthcareEntitiesResult> toRecognizeHealthcareEntitiesResults( HealthcareResult healthcareResult) { List<AnalyzeHealthcareEntitiesResult> analyzeHealthcareEntitiesResults = new ArrayList<>(); healthcareResult.getDocuments().forEach( documentEntities -> { final List<TextAnalyticsWarning> warnings = Optional.ofNullable(documentEntities.getWarnings()) .map(textAnalyticsWarnings -> textAnalyticsWarnings.stream().map( textAnalyticsWarning -> new TextAnalyticsWarning( Optional.ofNullable(textAnalyticsWarning.getCode()) .map(warningCodeValue -> WarningCode.fromString(warningCodeValue.toString())) .orElse(null), textAnalyticsWarning.getMessage()) ).collect(Collectors.toList()) ).orElse(new ArrayList<>()); final List<HealthcareEntity> healthcareEntities = documentEntities.getEntities().stream().map( entity -> { final HealthcareEntity healthcareEntity = new HealthcareEntity(); HealthcareEntityPropertiesHelper.setText(healthcareEntity, entity.getText()); HealthcareEntityPropertiesHelper.setCategory(healthcareEntity, entity.getCategory()); HealthcareEntityPropertiesHelper.setConfidenceScore(healthcareEntity, entity.getConfidenceScore()); HealthcareEntityPropertiesHelper.setOffset(healthcareEntity, entity.getOffset()); HealthcareEntityPropertiesHelper.setLength(healthcareEntity, entity.getLength()); HealthcareEntityPropertiesHelper.setDataSources(healthcareEntity, entity.getLinks() == null ? null : IterableStream.of(entity.getLinks().stream() .map(healthcareEntityLink -> { final EntityDataSource entityDataSourceOrigin = new EntityDataSource(); EntityDataSourcePropertiesHelper.setName(entityDataSourceOrigin, healthcareEntityLink.getDataSource()); EntityDataSourcePropertiesHelper.setEntityId( entityDataSourceOrigin, healthcareEntityLink.getId()); return entityDataSourceOrigin; }) .collect(Collectors.toList()))); HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, new HashMap<>()); return healthcareEntity; }).collect(Collectors.toList()); Map<HealthcareEntity, Map<HealthcareEntity, HealthcareEntityRelationType>> entityRelationMap = new HashMap<>(); if (!CoreUtils.isNullOrEmpty(documentEntities.getRelations())) { documentEntities.getRelations().forEach(healthcareRelation -> { final HealthcareEntity targetEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getTarget())); final HealthcareEntity sourceEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getSource())); final HealthcareEntityRelationType relationType = HealthcareEntityRelationType.fromString(healthcareRelation.getRelationType()); final Map<HealthcareEntity, HealthcareEntityRelationType> targetRelatedEntity = entityRelationMap.getOrDefault(targetEntity, new HashMap<>()); targetRelatedEntity.putIfAbsent(sourceEntity, relationType); entityRelationMap.putIfAbsent(targetEntity, targetRelatedEntity); if (healthcareRelation.isBidirectional()) { final Map<HealthcareEntity, HealthcareEntityRelationType> sourceRelatedEntity = entityRelationMap.getOrDefault(sourceEntity, new HashMap<>()); sourceRelatedEntity.putIfAbsent(targetEntity, relationType); entityRelationMap.putIfAbsent(sourceEntity, sourceRelatedEntity); } }); } healthcareEntities.forEach(healthcareEntity -> { if (entityRelationMap.containsKey(healthcareEntity)) { HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, entityRelationMap.get(healthcareEntity)); } }); final AnalyzeHealthcareEntitiesResult analyzeHealthcareEntitiesResult = new AnalyzeHealthcareEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null); AnalyzeHealthcareEntitiesResultPropertiesHelper.setEntities(analyzeHealthcareEntitiesResult, IterableStream.of(healthcareEntities)); AnalyzeHealthcareEntitiesResultPropertiesHelper.setWarnings(analyzeHealthcareEntitiesResult, IterableStream.of(warnings)); analyzeHealthcareEntitiesResults.add(analyzeHealthcareEntitiesResult); }); healthcareResult.getErrors().forEach(documentError -> analyzeHealthcareEntitiesResults.add(new AnalyzeHealthcareEntitiesResult( documentError.getId(), null, toTextAnalyticsError(documentError.getError()))) ); return IterableStream.of(analyzeHealthcareEntitiesResults); } /** * Helper function that parse healthcare entity index from the given entity reference string. * The entity reference format is " * * @param entityReference the given healthcare entity reference string. * * @return the healthcare entity index. */ private static Integer getHealthcareEntityIndex(String entityReference) { if (!CoreUtils.isNullOrEmpty(entityReference)) { int lastIndex = entityReference.lastIndexOf('/'); if (lastIndex != -1) { return Integer.parseInt(entityReference.substring(lastIndex + 1)); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse healthcare entity index from: " + entityReference)); } /** * Transfer {@link com.azure.ai.textanalytics.models.StringIndexType} into auto-generated {@link StringIndexType}. * If value is null, use the default type for java, UTF16CODE_UNIT. * * @param stringIndexType The public explored StringIndexType. * * @return The autogenerated internally used StringIndexType. */ public static StringIndexType getNonNullStringIndexType( com.azure.ai.textanalytics.models.StringIndexType stringIndexType) { return stringIndexType == null ? StringIndexType.UTF16CODE_UNIT : StringIndexType.fromString(stringIndexType.toString()); } /** * Get the non-null {@link Context}. The default value is {@link Context * * @param context It offers a means of passing arbitrary data (key-value pairs) to pipeline policies. * Most applications do not need to pass arbitrary data to the pipeline and can pass Context.NONE or null. * * @return The Context. */ public static Context getNotNullContext(Context context) { return context == null ? Context.NONE : context; } /** * Helper function which retrieves the size of an {@link Iterable}. * * @param documents The iterable of documents. * @return Count of documents in the iterable. */ public static int getDocumentCount(Iterable<?> documents) { if (documents instanceof Collection) { return ((Collection<?>) documents).size(); } else { final int[] count = new int[] { 0 }; documents.forEach(ignored -> count[0] += 1); return count[0]; } } }
It includes the "errorException.getValue().getError()" null checking.
public static Throwable mapToHttpResponseExceptionIfExists(Throwable throwable) { if (throwable instanceof ErrorResponseException) { ErrorResponseException errorException = (ErrorResponseException) throwable; if (errorException.getValue() != null) { return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), toTextAnalyticsError(errorException.getValue().getError())); } else { return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), null); } } return throwable; }
if (errorException.getValue() != null) {
public static Throwable mapToHttpResponseExceptionIfExists(Throwable throwable) { if (throwable instanceof ErrorResponseException) { ErrorResponseException errorException = (ErrorResponseException) throwable; final ErrorResponse errorResponse = errorException.getValue(); com.azure.ai.textanalytics.models.TextAnalyticsError textAnalyticsError = null; if (errorResponse != null && errorResponse.getError() != null) { textAnalyticsError = toTextAnalyticsError(errorResponse.getError()); } return new HttpResponseException(errorException.getMessage(), errorException.getResponse(), textAnalyticsError); } return throwable; }
class Utility { private static final ClientLogger LOGGER = new ClientLogger(Utility.class); public static final Duration DEFAULT_POLL_INTERVAL = Duration.ofSeconds(5); private Utility() { } /** * Verify that list of documents are not null or empty. Otherwise, throw exception. * * @param documents A list of documents. * * @throws NullPointerException if {@code documents} is null. * @throws IllegalArgumentException if {@code documents} is empty. */ public static void inputDocumentsValidation(Iterable<?> documents) { Objects.requireNonNull(documents, "'documents' cannot be null."); final Iterator<?> iterator = documents.iterator(); if (!iterator.hasNext()) { throw new IllegalArgumentException("'documents' cannot be empty."); } } /** * Get a mock {@link HttpResponse} that only return status code 400. * * @param response A {@link SimpleResponse} with any type * @return A mock {@link HttpResponse} that only return status code 400. */ public static HttpResponse getEmptyErrorIdHttpResponse(SimpleResponse<?> response) { return new HttpResponse(response.getRequest()) { @Override public int getStatusCode() { return 400; } @Override public String getHeaderValue(String s) { return null; } @Override public HttpHeaders getHeaders() { return null; } @Override public Flux<ByteBuffer> getBody() { return null; } @Override public Mono<byte[]> getBodyAsByteArray() { return null; } @Override public Mono<String> getBodyAsString() { return null; } @Override public Mono<String> getBodyAsString(Charset charset) { return null; } }; } /** * Mapping a {@link ErrorResponseException} to {@link HttpResponseException} if exist. Otherwise, return * original {@link Throwable}. * * @param throwable A {@link Throwable}. * @return A {@link HttpResponseException} or the original throwable type. */ /** * Given a list of documents will apply the indexing function to it and return the updated list. * * @param documents the inputs to apply the mapping function to. * @param mappingFunction the function which applies the index to the incoming input value. * @param <T> the type of items being returned in the list. * @return The list holding all the generic items combined. */ public static <T> List<T> mapByIndex(Iterable<String> documents, BiFunction<String, String, T> mappingFunction) { Objects.requireNonNull(documents, "'documents' cannot be null."); AtomicInteger i = new AtomicInteger(0); List<T> result = new ArrayList<>(); documents.forEach(document -> result.add(mappingFunction.apply(String.valueOf(i.getAndIncrement()), document)) ); return result; } /** * Convert {@link DocumentStatistics} to {@link TextDocumentStatistics} * * @param statistics the {@link DocumentStatistics} provided by the service. * @return the {@link TextDocumentStatistics} returned by the SDK. */ public static TextDocumentStatistics toTextDocumentStatistics(DocumentStatistics statistics) { return new TextDocumentStatistics(statistics.getCharactersCount(), statistics.getTransactionsCount()); } /** * Convert {@link RequestStatistics} to {@link TextDocumentBatchStatistics} * * @param statistics the {@link RequestStatistics} provided by the service. * @return the {@link TextDocumentBatchStatistics} returned by the SDK. */ public static TextDocumentBatchStatistics toBatchStatistics(RequestStatistics statistics) { return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getValidDocumentsCount(), statistics.getErroneousDocumentsCount(), statistics.getTransactionsCount()); } /** * Convert {@link TextAnalyticsError} to {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * This function maps the service returned {@link TextAnalyticsError inner error} to the top level * {@link com.azure.ai.textanalytics.models.TextAnalyticsError error}, if inner error present. * * @param textAnalyticsError the {@link TextAnalyticsError} returned by the service. * @return the {@link com.azure.ai.textanalytics.models.TextAnalyticsError} returned by the SDK. */ public static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyticsError( TextAnalyticsError textAnalyticsError) { final InnerError innerError = textAnalyticsError.getInnererror(); if (innerError == null) { final ErrorCodeValue errorCodeValue = textAnalyticsError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(errorCodeValue == null ? null : errorCodeValue.toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget()); } final InnerErrorCodeValue innerErrorCodeValue = innerError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(innerErrorCodeValue == null ? null : innerErrorCodeValue.toString()), innerError.getMessage(), innerError.getTarget()); } /** * Convert the incoming input {@link TextDocumentInput} to the service expected {@link MultiLanguageInput}. * * @param documents the user provided input in {@link TextDocumentInput} * @return the service required input {@link MultiLanguageInput} */ public static List<MultiLanguageInput> toMultiLanguageInput(Iterable<TextDocumentInput> documents) { List<MultiLanguageInput> multiLanguageInputs = new ArrayList<>(); for (TextDocumentInput textDocumentInput : documents) { multiLanguageInputs.add(new MultiLanguageInput().setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()).setLanguage(textDocumentInput.getLanguage())); } return multiLanguageInputs; } /** * Convert the incoming input {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * to a {@link TextAnalyticsException}. * * @param error the {@link com.azure.ai.textanalytics.models.TextAnalyticsError}. * @return the {@link TextAnalyticsException} to be thrown. */ public static TextAnalyticsException toTextAnalyticsException( com.azure.ai.textanalytics.models.TextAnalyticsError error) { return new TextAnalyticsException(error.getMessage(), error.getErrorCode(), error.getTarget()); } /** * Convert to a list of {@link LanguageInput} from {@link DetectLanguageInput}. * * @param documents The list of documents to detect languages for. * * @return a list of {@link LanguageInput}. */ public static List<LanguageInput> toLanguageInput(Iterable<DetectLanguageInput> documents) { final List<LanguageInput> multiLanguageInputs = new ArrayList<>(); documents.forEach(textDocumentInput -> multiLanguageInputs.add(new LanguageInput() .setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()) .setCountryHint(textDocumentInput.getCountryHint()))); return multiLanguageInputs; } /** * Extracts the operation ID from the 'operation-location' URL. An example of 'operation-location' is * https: * * @param operationLocation The URL specified in the 'Operation-Location' response header containing the * operation ID used to track the progress and obtain the ID of the analyze operation. * * @return The operation ID that tracks the long running operation progress. */ public static String parseOperationId(String operationLocation) { if (!CoreUtils.isNullOrEmpty(operationLocation)) { int lastIndex = operationLocation.lastIndexOf('/'); if (lastIndex != -1) { return operationLocation.substring(lastIndex + 1); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse operation header for operation Id from: " + operationLocation)); } /** * Extract the next pagination link which contains the request parameter values, into map, * such as '$skip=20' and '$top=2'. * * @param nextLink the next pagination link. * * @return A map that holds the request parameter value of next pagination link. */ public static Map<String, Integer> parseNextLink(String nextLink) { if (!CoreUtils.isNullOrEmpty(nextLink)) { Map<String, Integer> parameterMap = new HashMap<>(); String[] strings = nextLink.split("\\?", 2); String[] parameters = strings[1].split("&"); for (String parameter : parameters) { String[] parameterPair = parameter.split("="); parameterMap.put(parameterPair[0], Integer.valueOf(parameterPair[1])); } return parameterMap; } return new HashMap<>(); } public static RecognizeEntitiesResultCollection toRecognizeEntitiesResultCollectionResponse( final EntitiesResult entitiesResult) { List<RecognizeEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); entitiesResult.getDocuments().forEach(documentEntities -> recognizeEntitiesResults.add(new RecognizeEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new CategorizedEntityCollection( new IterableStream<>(documentEntities.getEntities().stream().map(entity -> { final CategorizedEntity categorizedEntity = new CategorizedEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset()); CategorizedEntityPropertiesHelper.setLength(categorizedEntity, entity.getLength()); return categorizedEntity; }).collect(Collectors.toList())), new IterableStream<>(documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))) ))); for (DocumentError documentError : entitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizeEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizeEntitiesResultCollection(recognizeEntitiesResults, entitiesResult.getModelVersion(), entitiesResult.getStatistics() == null ? null : toBatchStatistics(entitiesResult.getStatistics())); } public static RecognizePiiEntitiesResultCollection toRecognizePiiEntitiesResultCollection( final PiiResult piiEntitiesResult) { final List<RecognizePiiEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); piiEntitiesResult.getDocuments().forEach(documentEntities -> { final List<PiiEntity> piiEntities = documentEntities.getEntities().stream().map(entity -> new PiiEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset())) .collect(Collectors.toList()); final List<TextAnalyticsWarning> warnings = documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()); recognizeEntitiesResults.add(new RecognizePiiEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new PiiEntityCollection(new IterableStream<>(piiEntities), documentEntities.getRedactedText(), new IterableStream<>(warnings)) )); }); for (DocumentError documentError : piiEntitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizePiiEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizePiiEntitiesResultCollection(recognizeEntitiesResults, piiEntitiesResult.getModelVersion(), piiEntitiesResult.getStatistics() == null ? null : toBatchStatistics(piiEntitiesResult.getStatistics())); } public static ExtractKeyPhrasesResultCollection toExtractKeyPhrasesResultCollection( final KeyPhraseResult keyPhraseResult) { final List<ExtractKeyPhraseResult> keyPhraseResultList = new ArrayList<>(); for (DocumentKeyPhrases documentKeyPhrases : keyPhraseResult.getDocuments()) { final String documentId = documentKeyPhrases.getId(); keyPhraseResultList.add(new ExtractKeyPhraseResult( documentId, documentKeyPhrases.getStatistics() == null ? null : toTextDocumentStatistics(documentKeyPhrases.getStatistics()), null, new KeyPhrasesCollection( new IterableStream<>(documentKeyPhrases.getKeyPhrases()), new IterableStream<>(documentKeyPhrases.getWarnings().stream().map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))))); } for (DocumentError documentError : keyPhraseResult.getErrors()) { keyPhraseResultList.add(new ExtractKeyPhraseResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new ExtractKeyPhrasesResultCollection(keyPhraseResultList, keyPhraseResult.getModelVersion(), keyPhraseResult.getStatistics() == null ? null : toBatchStatistics(keyPhraseResult.getStatistics())); } /** * Transfer {@link HealthcareResult} into {@link IterableStream} of {@link AnalyzeHealthcareEntitiesResult}. * * @param healthcareResult the service side raw data, HealthcareResult. * * @return the client side explored model, RecognizeHealthcareEntitiesResultCollection. */ public static IterableStream<AnalyzeHealthcareEntitiesResult> toRecognizeHealthcareEntitiesResults( HealthcareResult healthcareResult) { List<AnalyzeHealthcareEntitiesResult> analyzeHealthcareEntitiesResults = new ArrayList<>(); healthcareResult.getDocuments().forEach( documentEntities -> { final List<TextAnalyticsWarning> warnings = Optional.ofNullable(documentEntities.getWarnings()) .map(textAnalyticsWarnings -> textAnalyticsWarnings.stream().map( textAnalyticsWarning -> new TextAnalyticsWarning( Optional.ofNullable(textAnalyticsWarning.getCode()) .map(warningCodeValue -> WarningCode.fromString(warningCodeValue.toString())) .orElse(null), textAnalyticsWarning.getMessage()) ).collect(Collectors.toList()) ).orElse(new ArrayList<>()); final List<HealthcareEntity> healthcareEntities = documentEntities.getEntities().stream().map( entity -> { final HealthcareEntity healthcareEntity = new HealthcareEntity(); HealthcareEntityPropertiesHelper.setText(healthcareEntity, entity.getText()); HealthcareEntityPropertiesHelper.setCategory(healthcareEntity, entity.getCategory()); HealthcareEntityPropertiesHelper.setConfidenceScore(healthcareEntity, entity.getConfidenceScore()); HealthcareEntityPropertiesHelper.setOffset(healthcareEntity, entity.getOffset()); HealthcareEntityPropertiesHelper.setLength(healthcareEntity, entity.getLength()); HealthcareEntityPropertiesHelper.setDataSources(healthcareEntity, entity.getLinks() == null ? null : IterableStream.of(entity.getLinks().stream() .map(healthcareEntityLink -> { final EntityDataSource entityDataSourceOrigin = new EntityDataSource(); EntityDataSourcePropertiesHelper.setName(entityDataSourceOrigin, healthcareEntityLink.getDataSource()); EntityDataSourcePropertiesHelper.setEntityId( entityDataSourceOrigin, healthcareEntityLink.getId()); return entityDataSourceOrigin; }) .collect(Collectors.toList()))); HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, new HashMap<>()); return healthcareEntity; }).collect(Collectors.toList()); Map<HealthcareEntity, Map<HealthcareEntity, HealthcareEntityRelationType>> entityRelationMap = new HashMap<>(); if (!CoreUtils.isNullOrEmpty(documentEntities.getRelations())) { documentEntities.getRelations().forEach(healthcareRelation -> { final HealthcareEntity targetEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getTarget())); final HealthcareEntity sourceEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getSource())); final HealthcareEntityRelationType relationType = HealthcareEntityRelationType.fromString(healthcareRelation.getRelationType()); final Map<HealthcareEntity, HealthcareEntityRelationType> targetRelatedEntity = entityRelationMap.getOrDefault(targetEntity, new HashMap<>()); targetRelatedEntity.putIfAbsent(sourceEntity, relationType); entityRelationMap.putIfAbsent(targetEntity, targetRelatedEntity); if (healthcareRelation.isBidirectional()) { final Map<HealthcareEntity, HealthcareEntityRelationType> sourceRelatedEntity = entityRelationMap.getOrDefault(sourceEntity, new HashMap<>()); sourceRelatedEntity.putIfAbsent(targetEntity, relationType); entityRelationMap.putIfAbsent(sourceEntity, sourceRelatedEntity); } }); } healthcareEntities.forEach(healthcareEntity -> { if (entityRelationMap.containsKey(healthcareEntity)) { HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, entityRelationMap.get(healthcareEntity)); } }); final AnalyzeHealthcareEntitiesResult analyzeHealthcareEntitiesResult = new AnalyzeHealthcareEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null); AnalyzeHealthcareEntitiesResultPropertiesHelper.setEntities(analyzeHealthcareEntitiesResult, IterableStream.of(healthcareEntities)); AnalyzeHealthcareEntitiesResultPropertiesHelper.setWarnings(analyzeHealthcareEntitiesResult, IterableStream.of(warnings)); analyzeHealthcareEntitiesResults.add(analyzeHealthcareEntitiesResult); }); healthcareResult.getErrors().forEach(documentError -> analyzeHealthcareEntitiesResults.add(new AnalyzeHealthcareEntitiesResult( documentError.getId(), null, toTextAnalyticsError(documentError.getError()))) ); return IterableStream.of(analyzeHealthcareEntitiesResults); } /** * Helper function that parse healthcare entity index from the given entity reference string. * The entity reference format is " * * @param entityReference the given healthcare entity reference string. * * @return the healthcare entity index. */ private static Integer getHealthcareEntityIndex(String entityReference) { if (!CoreUtils.isNullOrEmpty(entityReference)) { int lastIndex = entityReference.lastIndexOf('/'); if (lastIndex != -1) { return Integer.parseInt(entityReference.substring(lastIndex + 1)); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse healthcare entity index from: " + entityReference)); } /** * Transfer {@link com.azure.ai.textanalytics.models.StringIndexType} into auto-generated {@link StringIndexType}. * If value is null, use the default type for java, UTF16CODE_UNIT. * * @param stringIndexType The public explored StringIndexType. * * @return The autogenerated internally used StringIndexType. */ public static StringIndexType getNonNullStringIndexType( com.azure.ai.textanalytics.models.StringIndexType stringIndexType) { return stringIndexType == null ? StringIndexType.UTF16CODE_UNIT : StringIndexType.fromString(stringIndexType.toString()); } /** * Get the non-null {@link Context}. The default value is {@link Context * * @param context It offers a means of passing arbitrary data (key-value pairs) to pipeline policies. * Most applications do not need to pass arbitrary data to the pipeline and can pass Context.NONE or null. * * @return The Context. */ public static Context getNotNullContext(Context context) { return context == null ? Context.NONE : context; } }
class Utility { private static final ClientLogger LOGGER = new ClientLogger(Utility.class); public static final Duration DEFAULT_POLL_INTERVAL = Duration.ofSeconds(5); private Utility() { } /** * Verify that list of documents are not null or empty. Otherwise, throw exception. * * @param documents A list of documents. * * @throws NullPointerException if {@code documents} is null. * @throws IllegalArgumentException if {@code documents} is empty. */ public static void inputDocumentsValidation(Iterable<?> documents) { Objects.requireNonNull(documents, "'documents' cannot be null."); final Iterator<?> iterator = documents.iterator(); if (!iterator.hasNext()) { throw new IllegalArgumentException("'documents' cannot be empty."); } } /** * Mapping a {@link ErrorResponseException} to {@link HttpResponseException} if exist. Otherwise, return * original {@link Throwable}. * * @param throwable A {@link Throwable}. * @return A {@link HttpResponseException} or the original throwable type. */ /** * Given a list of documents will apply the indexing function to it and return the updated list. * * @param documents the inputs to apply the mapping function to. * @param mappingFunction the function which applies the index to the incoming input value. * @param <T> the type of items being returned in the list. * @return The list holding all the generic items combined. */ public static <T> List<T> mapByIndex(Iterable<String> documents, BiFunction<String, String, T> mappingFunction) { Objects.requireNonNull(documents, "'documents' cannot be null."); AtomicInteger i = new AtomicInteger(0); List<T> result = new ArrayList<>(); documents.forEach(document -> result.add(mappingFunction.apply(String.valueOf(i.getAndIncrement()), document)) ); return result; } /** * Convert {@link DocumentStatistics} to {@link TextDocumentStatistics} * * @param statistics the {@link DocumentStatistics} provided by the service. * @return the {@link TextDocumentStatistics} returned by the SDK. */ public static TextDocumentStatistics toTextDocumentStatistics(DocumentStatistics statistics) { return new TextDocumentStatistics(statistics.getCharactersCount(), statistics.getTransactionsCount()); } /** * Convert {@link RequestStatistics} to {@link TextDocumentBatchStatistics} * * @param statistics the {@link RequestStatistics} provided by the service. * @return the {@link TextDocumentBatchStatistics} returned by the SDK. */ public static TextDocumentBatchStatistics toBatchStatistics(RequestStatistics statistics) { return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getValidDocumentsCount(), statistics.getErroneousDocumentsCount(), statistics.getTransactionsCount()); } /** * Convert {@link TextAnalyticsError} to {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * This function maps the service returned {@link TextAnalyticsError inner error} to the top level * {@link com.azure.ai.textanalytics.models.TextAnalyticsError error}, if inner error present. * * @param textAnalyticsError the {@link TextAnalyticsError} returned by the service. * @return the {@link com.azure.ai.textanalytics.models.TextAnalyticsError} returned by the SDK. */ public static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyticsError( TextAnalyticsError textAnalyticsError) { final InnerError innerError = textAnalyticsError.getInnererror(); if (innerError == null) { final ErrorCodeValue errorCodeValue = textAnalyticsError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(errorCodeValue == null ? null : errorCodeValue.toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget()); } final InnerErrorCodeValue innerErrorCodeValue = innerError.getCode(); return new com.azure.ai.textanalytics.models.TextAnalyticsError( TextAnalyticsErrorCode.fromString(innerErrorCodeValue == null ? null : innerErrorCodeValue.toString()), innerError.getMessage(), innerError.getTarget()); } /** * Convert the incoming input {@link TextDocumentInput} to the service expected {@link MultiLanguageInput}. * * @param documents the user provided input in {@link TextDocumentInput} * @return the service required input {@link MultiLanguageInput} */ public static List<MultiLanguageInput> toMultiLanguageInput(Iterable<TextDocumentInput> documents) { List<MultiLanguageInput> multiLanguageInputs = new ArrayList<>(); for (TextDocumentInput textDocumentInput : documents) { multiLanguageInputs.add(new MultiLanguageInput().setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()).setLanguage(textDocumentInput.getLanguage())); } return multiLanguageInputs; } /** * Convert the incoming input {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * to a {@link TextAnalyticsException}. * * @param error the {@link com.azure.ai.textanalytics.models.TextAnalyticsError}. * @return the {@link TextAnalyticsException} to be thrown. */ public static TextAnalyticsException toTextAnalyticsException( com.azure.ai.textanalytics.models.TextAnalyticsError error) { return new TextAnalyticsException(error.getMessage(), error.getErrorCode(), error.getTarget()); } /** * Convert to a list of {@link LanguageInput} from {@link DetectLanguageInput}. * * @param documents The list of documents to detect languages for. * * @return a list of {@link LanguageInput}. */ public static List<LanguageInput> toLanguageInput(Iterable<DetectLanguageInput> documents) { final List<LanguageInput> multiLanguageInputs = new ArrayList<>(); documents.forEach(textDocumentInput -> multiLanguageInputs.add(new LanguageInput() .setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()) .setCountryHint(textDocumentInput.getCountryHint()))); return multiLanguageInputs; } /** * Extracts the operation ID from the 'operation-location' URL. An example of 'operation-location' is * https: * * @param operationLocation The URL specified in the 'Operation-Location' response header containing the * operation ID used to track the progress and obtain the ID of the analyze operation. * * @return The operation ID that tracks the long running operation progress. */ public static String parseOperationId(String operationLocation) { if (!CoreUtils.isNullOrEmpty(operationLocation)) { int lastIndex = operationLocation.lastIndexOf('/'); if (lastIndex != -1) { return operationLocation.substring(lastIndex + 1); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse operation header for operation Id from: " + operationLocation)); } /** * Extract the next pagination link which contains the request parameter values, into map, * such as '$skip=20' and '$top=2'. * * @param nextLink the next pagination link. * * @return A map that holds the request parameter value of next pagination link. */ public static Map<String, Integer> parseNextLink(String nextLink) { if (!CoreUtils.isNullOrEmpty(nextLink)) { Map<String, Integer> parameterMap = new HashMap<>(); String[] strings = nextLink.split("\\?", 2); String[] parameters = strings[1].split("&"); for (String parameter : parameters) { String[] parameterPair = parameter.split("="); parameterMap.put(parameterPair[0], Integer.valueOf(parameterPair[1])); } return parameterMap; } return new HashMap<>(); } public static RecognizeEntitiesResultCollection toRecognizeEntitiesResultCollectionResponse( final EntitiesResult entitiesResult) { List<RecognizeEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); entitiesResult.getDocuments().forEach(documentEntities -> recognizeEntitiesResults.add(new RecognizeEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new CategorizedEntityCollection( new IterableStream<>(documentEntities.getEntities().stream().map(entity -> { final CategorizedEntity categorizedEntity = new CategorizedEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset()); CategorizedEntityPropertiesHelper.setLength(categorizedEntity, entity.getLength()); return categorizedEntity; }).collect(Collectors.toList())), new IterableStream<>(documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))) ))); for (DocumentError documentError : entitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizeEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizeEntitiesResultCollection(recognizeEntitiesResults, entitiesResult.getModelVersion(), entitiesResult.getStatistics() == null ? null : toBatchStatistics(entitiesResult.getStatistics())); } public static RecognizePiiEntitiesResultCollection toRecognizePiiEntitiesResultCollection( final PiiResult piiEntitiesResult) { final List<RecognizePiiEntitiesResult> recognizeEntitiesResults = new ArrayList<>(); piiEntitiesResult.getDocuments().forEach(documentEntities -> { final List<PiiEntity> piiEntities = documentEntities.getEntities().stream().map(entity -> new PiiEntity(entity.getText(), EntityCategory.fromString(entity.getCategory()), entity.getSubcategory(), entity.getConfidenceScore(), entity.getOffset())) .collect(Collectors.toList()); final List<TextAnalyticsWarning> warnings = documentEntities.getWarnings().stream() .map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()); recognizeEntitiesResults.add(new RecognizePiiEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null, new PiiEntityCollection(new IterableStream<>(piiEntities), documentEntities.getRedactedText(), new IterableStream<>(warnings)) )); }); for (DocumentError documentError : piiEntitiesResult.getErrors()) { recognizeEntitiesResults.add(new RecognizePiiEntitiesResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new RecognizePiiEntitiesResultCollection(recognizeEntitiesResults, piiEntitiesResult.getModelVersion(), piiEntitiesResult.getStatistics() == null ? null : toBatchStatistics(piiEntitiesResult.getStatistics())); } public static ExtractKeyPhrasesResultCollection toExtractKeyPhrasesResultCollection( final KeyPhraseResult keyPhraseResult) { final List<ExtractKeyPhraseResult> keyPhraseResultList = new ArrayList<>(); for (DocumentKeyPhrases documentKeyPhrases : keyPhraseResult.getDocuments()) { final String documentId = documentKeyPhrases.getId(); keyPhraseResultList.add(new ExtractKeyPhraseResult( documentId, documentKeyPhrases.getStatistics() == null ? null : toTextDocumentStatistics(documentKeyPhrases.getStatistics()), null, new KeyPhrasesCollection( new IterableStream<>(documentKeyPhrases.getKeyPhrases()), new IterableStream<>(documentKeyPhrases.getWarnings().stream().map(warning -> { final WarningCodeValue warningCodeValue = warning.getCode(); return new TextAnalyticsWarning( WarningCode.fromString(warningCodeValue == null ? null : warningCodeValue.toString()), warning.getMessage()); }).collect(Collectors.toList()))))); } for (DocumentError documentError : keyPhraseResult.getErrors()) { keyPhraseResultList.add(new ExtractKeyPhraseResult(documentError.getId(), null, toTextAnalyticsError(documentError.getError()), null)); } return new ExtractKeyPhrasesResultCollection(keyPhraseResultList, keyPhraseResult.getModelVersion(), keyPhraseResult.getStatistics() == null ? null : toBatchStatistics(keyPhraseResult.getStatistics())); } /** * Transfer {@link HealthcareResult} into {@link IterableStream} of {@link AnalyzeHealthcareEntitiesResult}. * * @param healthcareResult the service side raw data, HealthcareResult. * * @return the client side explored model, RecognizeHealthcareEntitiesResultCollection. */ public static IterableStream<AnalyzeHealthcareEntitiesResult> toRecognizeHealthcareEntitiesResults( HealthcareResult healthcareResult) { List<AnalyzeHealthcareEntitiesResult> analyzeHealthcareEntitiesResults = new ArrayList<>(); healthcareResult.getDocuments().forEach( documentEntities -> { final List<TextAnalyticsWarning> warnings = Optional.ofNullable(documentEntities.getWarnings()) .map(textAnalyticsWarnings -> textAnalyticsWarnings.stream().map( textAnalyticsWarning -> new TextAnalyticsWarning( Optional.ofNullable(textAnalyticsWarning.getCode()) .map(warningCodeValue -> WarningCode.fromString(warningCodeValue.toString())) .orElse(null), textAnalyticsWarning.getMessage()) ).collect(Collectors.toList()) ).orElse(new ArrayList<>()); final List<HealthcareEntity> healthcareEntities = documentEntities.getEntities().stream().map( entity -> { final HealthcareEntity healthcareEntity = new HealthcareEntity(); HealthcareEntityPropertiesHelper.setText(healthcareEntity, entity.getText()); HealthcareEntityPropertiesHelper.setCategory(healthcareEntity, entity.getCategory()); HealthcareEntityPropertiesHelper.setConfidenceScore(healthcareEntity, entity.getConfidenceScore()); HealthcareEntityPropertiesHelper.setOffset(healthcareEntity, entity.getOffset()); HealthcareEntityPropertiesHelper.setLength(healthcareEntity, entity.getLength()); HealthcareEntityPropertiesHelper.setDataSources(healthcareEntity, entity.getLinks() == null ? null : IterableStream.of(entity.getLinks().stream() .map(healthcareEntityLink -> { final EntityDataSource entityDataSourceOrigin = new EntityDataSource(); EntityDataSourcePropertiesHelper.setName(entityDataSourceOrigin, healthcareEntityLink.getDataSource()); EntityDataSourcePropertiesHelper.setEntityId( entityDataSourceOrigin, healthcareEntityLink.getId()); return entityDataSourceOrigin; }) .collect(Collectors.toList()))); HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, new HashMap<>()); return healthcareEntity; }).collect(Collectors.toList()); Map<HealthcareEntity, Map<HealthcareEntity, HealthcareEntityRelationType>> entityRelationMap = new HashMap<>(); if (!CoreUtils.isNullOrEmpty(documentEntities.getRelations())) { documentEntities.getRelations().forEach(healthcareRelation -> { final HealthcareEntity targetEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getTarget())); final HealthcareEntity sourceEntity = healthcareEntities.get(getHealthcareEntityIndex(healthcareRelation.getSource())); final HealthcareEntityRelationType relationType = HealthcareEntityRelationType.fromString(healthcareRelation.getRelationType()); final Map<HealthcareEntity, HealthcareEntityRelationType> targetRelatedEntity = entityRelationMap.getOrDefault(targetEntity, new HashMap<>()); targetRelatedEntity.putIfAbsent(sourceEntity, relationType); entityRelationMap.putIfAbsent(targetEntity, targetRelatedEntity); if (healthcareRelation.isBidirectional()) { final Map<HealthcareEntity, HealthcareEntityRelationType> sourceRelatedEntity = entityRelationMap.getOrDefault(sourceEntity, new HashMap<>()); sourceRelatedEntity.putIfAbsent(targetEntity, relationType); entityRelationMap.putIfAbsent(sourceEntity, sourceRelatedEntity); } }); } healthcareEntities.forEach(healthcareEntity -> { if (entityRelationMap.containsKey(healthcareEntity)) { HealthcareEntityPropertiesHelper.setRelatedEntities(healthcareEntity, entityRelationMap.get(healthcareEntity)); } }); final AnalyzeHealthcareEntitiesResult analyzeHealthcareEntitiesResult = new AnalyzeHealthcareEntitiesResult( documentEntities.getId(), documentEntities.getStatistics() == null ? null : toTextDocumentStatistics(documentEntities.getStatistics()), null); AnalyzeHealthcareEntitiesResultPropertiesHelper.setEntities(analyzeHealthcareEntitiesResult, IterableStream.of(healthcareEntities)); AnalyzeHealthcareEntitiesResultPropertiesHelper.setWarnings(analyzeHealthcareEntitiesResult, IterableStream.of(warnings)); analyzeHealthcareEntitiesResults.add(analyzeHealthcareEntitiesResult); }); healthcareResult.getErrors().forEach(documentError -> analyzeHealthcareEntitiesResults.add(new AnalyzeHealthcareEntitiesResult( documentError.getId(), null, toTextAnalyticsError(documentError.getError()))) ); return IterableStream.of(analyzeHealthcareEntitiesResults); } /** * Helper function that parse healthcare entity index from the given entity reference string. * The entity reference format is " * * @param entityReference the given healthcare entity reference string. * * @return the healthcare entity index. */ private static Integer getHealthcareEntityIndex(String entityReference) { if (!CoreUtils.isNullOrEmpty(entityReference)) { int lastIndex = entityReference.lastIndexOf('/'); if (lastIndex != -1) { return Integer.parseInt(entityReference.substring(lastIndex + 1)); } } throw LOGGER.logExceptionAsError( new RuntimeException("Failed to parse healthcare entity index from: " + entityReference)); } /** * Transfer {@link com.azure.ai.textanalytics.models.StringIndexType} into auto-generated {@link StringIndexType}. * If value is null, use the default type for java, UTF16CODE_UNIT. * * @param stringIndexType The public explored StringIndexType. * * @return The autogenerated internally used StringIndexType. */ public static StringIndexType getNonNullStringIndexType( com.azure.ai.textanalytics.models.StringIndexType stringIndexType) { return stringIndexType == null ? StringIndexType.UTF16CODE_UNIT : StringIndexType.fromString(stringIndexType.toString()); } /** * Get the non-null {@link Context}. The default value is {@link Context * * @param context It offers a means of passing arbitrary data (key-value pairs) to pipeline policies. * Most applications do not need to pass arbitrary data to the pipeline and can pass Context.NONE or null. * * @return The Context. */ public static Context getNotNullContext(Context context) { return context == null ? Context.NONE : context; } /** * Helper function which retrieves the size of an {@link Iterable}. * * @param documents The iterable of documents. * @return Count of documents in the iterable. */ public static int getDocumentCount(Iterable<?> documents) { if (documents instanceof Collection) { return ((Collection<?>) documents).size(); } else { final int[] count = new int[] { 0 }; documents.forEach(ignored -> count[0] += 1); return count[0]; } } }
will this line overwrite above settings?
protected void setCustomHeaders(MessageHeaders headers, IMessage message) { getStringHeader(headers, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, MESSAGE_ID).ifPresent(message::setMessageId); Optional.ofNullable((Duration) headers.get(TIME_TO_LIVE)) .ifPresent(message::setTimeToLive); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME_UTC)) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, TO).ifPresent(message::setTo); getStringHeader(headers, LABEL).ifPresent(message::setLabel); getStringHeader(headers, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, PARTITION_KEY).ifPresent(message::setPartitionKey); getStringHeader(headers, VIA_PARTITION_KEY).ifPresent(message::setViaPartitionKey); headers.forEach((key, value) -> message.getProperties().put(key, value.toString())); }
headers.forEach((key, value) -> message.getProperties().put(key, value.toString()));
protected void setCustomHeaders(MessageHeaders headers, IMessage message) { getStringHeader(headers, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, MESSAGE_ID).ifPresent(message::setMessageId); Optional.ofNullable((Duration) headers.get(TIME_TO_LIVE)) .ifPresent(message::setTimeToLive); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME)) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, TO).ifPresent(message::setTo); getStringHeader(headers, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, PARTITION_KEY).ifPresent(message::setPartitionKey); headers.forEach((key, value) -> message.getProperties().put(key, value.toString())); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<IMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(IMessage azureMessage) { MessageBody messageBody = azureMessage.getMessageBody(); if (messageBody == null) { return new byte[0]; } switch (messageBody.getBodyType()) { case BINARY: return messageBody.getBinaryData().stream().findFirst().orElse(null); case VALUE: return String.valueOf(messageBody.getValueData()).getBytes(StandardCharsets.UTF_8); case SEQUENCE: return toPayload(messageBody.getSequenceData().stream().findFirst().orElse(null)); default: return new byte[0]; } } @Override protected IMessage fromString(String payload) { return new Message(payload); } @Override protected IMessage fromByte(byte[] payload) { return new Message(payload); } @Override private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, String key) { return Optional.ofNullable(springMessageHeaders.get(key)) .map(Object::toString) .filter(StringUtils::hasText); } @Override protected Map<String, Object> buildCustomHeaders(IMessage message) { Map<String, Object> headers = new HashMap<>(); Optional.ofNullable(message.getMessageId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MessageHeaders.ID, s)); Optional.ofNullable(message.getContentType()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MessageHeaders.CONTENT_TYPE, s)); Optional.ofNullable(message.getReplyTo()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MessageHeaders.REPLY_CHANNEL, s)); Optional.ofNullable(message.getMessageId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(AzureHeaders.RAW_ID, s)); Optional.ofNullable(message.getMessageId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MESSAGE_ID, s)); Optional.ofNullable(message.getTimeToLive()) .ifPresent(s -> headers.put(TIME_TO_LIVE, s)); Optional.ofNullable(message.getScheduledEnqueueTimeUtc()) .ifPresent(s -> headers.put(SCHEDULED_ENQUEUE_TIME_UTC, s)); Optional.ofNullable(message.getSessionId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(SESSION_ID, s)); Optional.ofNullable(message.getCorrelationId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(CORRELATION_ID, s)); Optional.ofNullable(message.getTo()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(TO, s)); Optional.ofNullable(message.getLabel()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(LABEL, s)); Optional.ofNullable(message.getReplyToSessionId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(REPLY_TO_SESSION_ID, s)); Optional.ofNullable(message.getPartitionKey()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(PARTITION_KEY, s)); Optional.ofNullable(message.getViaPartitionKey()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(VIA_PARTITION_KEY, s)); headers.putAll(message.getProperties()); return Collections.unmodifiableMap(headers); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<IMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(IMessage azureMessage) { MessageBody messageBody = azureMessage.getMessageBody(); if (messageBody == null) { return new byte[0]; } switch (messageBody.getBodyType()) { case BINARY: return messageBody.getBinaryData().stream().findFirst().orElse(null); case VALUE: return String.valueOf(messageBody.getValueData()).getBytes(StandardCharsets.UTF_8); case SEQUENCE: return toPayload(messageBody.getSequenceData().stream().findFirst().orElse(null)); default: return new byte[0]; } } @Override protected IMessage fromString(String payload) { return new Message(payload); } @Override protected IMessage fromByte(byte[] payload) { return new Message(payload); } @Override private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, String key) { return Optional.ofNullable(springMessageHeaders.get(key)) .map(Object::toString) .filter(StringUtils::hasText); } @Override protected Map<String, Object> buildCustomHeaders(IMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTimeUtc()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); headers.putAll(message.getProperties()); return Collections.unmodifiableMap(headers); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value) .filter(StringUtils::hasText) .ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value) .ifPresent(s -> map.put(key, s)); } }
No. Above setting set specific field, like `setMessageId, setSessionId`. This line call `.getProperties().put(...)`, just add items in `properties`.
protected void setCustomHeaders(MessageHeaders headers, IMessage message) { getStringHeader(headers, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, MESSAGE_ID).ifPresent(message::setMessageId); Optional.ofNullable((Duration) headers.get(TIME_TO_LIVE)) .ifPresent(message::setTimeToLive); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME_UTC)) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, TO).ifPresent(message::setTo); getStringHeader(headers, LABEL).ifPresent(message::setLabel); getStringHeader(headers, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, PARTITION_KEY).ifPresent(message::setPartitionKey); getStringHeader(headers, VIA_PARTITION_KEY).ifPresent(message::setViaPartitionKey); headers.forEach((key, value) -> message.getProperties().put(key, value.toString())); }
headers.forEach((key, value) -> message.getProperties().put(key, value.toString()));
protected void setCustomHeaders(MessageHeaders headers, IMessage message) { getStringHeader(headers, MessageHeaders.ID).ifPresent(message::setMessageId); getStringHeader(headers, MessageHeaders.CONTENT_TYPE).ifPresent(message::setContentType); getStringHeader(headers, MessageHeaders.REPLY_CHANNEL).ifPresent(message::setReplyTo); getStringHeader(headers, AzureHeaders.RAW_ID).ifPresent(message::setMessageId); Optional.ofNullable(headers.get(AzureHeaders.SCHEDULED_ENQUEUE_MESSAGE, Integer.class)) .map(Duration::ofMillis) .map(Instant.now()::plus) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, MESSAGE_ID).ifPresent(message::setMessageId); Optional.ofNullable((Duration) headers.get(TIME_TO_LIVE)) .ifPresent(message::setTimeToLive); Optional.ofNullable((Instant) headers.get(SCHEDULED_ENQUEUE_TIME)) .ifPresent(message::setScheduledEnqueueTimeUtc); getStringHeader(headers, SESSION_ID).ifPresent(message::setSessionId); getStringHeader(headers, CORRELATION_ID).ifPresent(message::setCorrelationId); getStringHeader(headers, TO).ifPresent(message::setTo); getStringHeader(headers, REPLY_TO_SESSION_ID).ifPresent(message::setReplyToSessionId); getStringHeader(headers, PARTITION_KEY).ifPresent(message::setPartitionKey); headers.forEach((key, value) -> message.getProperties().put(key, value.toString())); }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<IMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(IMessage azureMessage) { MessageBody messageBody = azureMessage.getMessageBody(); if (messageBody == null) { return new byte[0]; } switch (messageBody.getBodyType()) { case BINARY: return messageBody.getBinaryData().stream().findFirst().orElse(null); case VALUE: return String.valueOf(messageBody.getValueData()).getBytes(StandardCharsets.UTF_8); case SEQUENCE: return toPayload(messageBody.getSequenceData().stream().findFirst().orElse(null)); default: return new byte[0]; } } @Override protected IMessage fromString(String payload) { return new Message(payload); } @Override protected IMessage fromByte(byte[] payload) { return new Message(payload); } @Override private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, String key) { return Optional.ofNullable(springMessageHeaders.get(key)) .map(Object::toString) .filter(StringUtils::hasText); } @Override protected Map<String, Object> buildCustomHeaders(IMessage message) { Map<String, Object> headers = new HashMap<>(); Optional.ofNullable(message.getMessageId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MessageHeaders.ID, s)); Optional.ofNullable(message.getContentType()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MessageHeaders.CONTENT_TYPE, s)); Optional.ofNullable(message.getReplyTo()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MessageHeaders.REPLY_CHANNEL, s)); Optional.ofNullable(message.getMessageId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(AzureHeaders.RAW_ID, s)); Optional.ofNullable(message.getMessageId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(MESSAGE_ID, s)); Optional.ofNullable(message.getTimeToLive()) .ifPresent(s -> headers.put(TIME_TO_LIVE, s)); Optional.ofNullable(message.getScheduledEnqueueTimeUtc()) .ifPresent(s -> headers.put(SCHEDULED_ENQUEUE_TIME_UTC, s)); Optional.ofNullable(message.getSessionId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(SESSION_ID, s)); Optional.ofNullable(message.getCorrelationId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(CORRELATION_ID, s)); Optional.ofNullable(message.getTo()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(TO, s)); Optional.ofNullable(message.getLabel()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(LABEL, s)); Optional.ofNullable(message.getReplyToSessionId()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(REPLY_TO_SESSION_ID, s)); Optional.ofNullable(message.getPartitionKey()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(PARTITION_KEY, s)); Optional.ofNullable(message.getViaPartitionKey()) .filter(StringUtils::hasText) .ifPresent(s -> headers.put(VIA_PARTITION_KEY, s)); headers.putAll(message.getProperties()); return Collections.unmodifiableMap(headers); } }
class ServiceBusMessageConverter extends AbstractAzureMessageConverter<IMessage> { private final ObjectMapper objectMapper; public ServiceBusMessageConverter() { objectMapper = OBJECT_MAPPER; } public ServiceBusMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(IMessage azureMessage) { MessageBody messageBody = azureMessage.getMessageBody(); if (messageBody == null) { return new byte[0]; } switch (messageBody.getBodyType()) { case BINARY: return messageBody.getBinaryData().stream().findFirst().orElse(null); case VALUE: return String.valueOf(messageBody.getValueData()).getBytes(StandardCharsets.UTF_8); case SEQUENCE: return toPayload(messageBody.getSequenceData().stream().findFirst().orElse(null)); default: return new byte[0]; } } @Override protected IMessage fromString(String payload) { return new Message(payload); } @Override protected IMessage fromByte(byte[] payload) { return new Message(payload); } @Override private Optional<String> getStringHeader(MessageHeaders springMessageHeaders, String key) { return Optional.ofNullable(springMessageHeaders.get(key)) .map(Object::toString) .filter(StringUtils::hasText); } @Override protected Map<String, Object> buildCustomHeaders(IMessage message) { Map<String, Object> headers = new HashMap<>(); setValueIfHasText(headers, MessageHeaders.ID, message.getMessageId()); setValueIfHasText(headers, MessageHeaders.CONTENT_TYPE, message.getContentType()); setValueIfHasText(headers, MessageHeaders.REPLY_CHANNEL, message.getReplyTo()); setValueIfHasText(headers, AzureHeaders.RAW_ID, message.getMessageId()); setValueIfHasText(headers, CORRELATION_ID, message.getCorrelationId()); setValueIfHasText(headers, MESSAGE_ID, message.getMessageId()); setValueIfHasText(headers, PARTITION_KEY, message.getPartitionKey()); setValueIfHasText(headers, TO, message.getTo()); setValueIfPresent(headers, TIME_TO_LIVE, message.getTimeToLive()); setValueIfPresent(headers, SCHEDULED_ENQUEUE_TIME, message.getScheduledEnqueueTimeUtc()); setValueIfHasText(headers, REPLY_TO_SESSION_ID, message.getReplyToSessionId()); setValueIfHasText(headers, SESSION_ID, message.getSessionId()); headers.putAll(message.getProperties()); return Collections.unmodifiableMap(headers); } private void setValueIfHasText(Map<String, Object> map, String key, String value) { Optional.ofNullable(value) .filter(StringUtils::hasText) .ifPresent(s -> map.put(key, s)); } private void setValueIfPresent(Map<String, Object> map, String key, Object value) { Optional.ofNullable(value) .ifPresent(s -> map.put(key, s)); } }