comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
yes (because previously try to move the retry on RBAC here, but find out there are MSI created in other resources that require retry on RBAC)
private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); }
.flatMap(roleEntry -> manager()
private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
From what I remember, the inner model has properties like `List<MicrosoftGraphPasswordCredentialInner> passwordCredentials`, that would be different after `submitCredentialsAsync` added some passwords; hence this refresh.
private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); }
.then(Mono.defer(() -> {
private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
The password could be null here and refresh would be called anyway, so I guess it's more related to refresh service principal itself? (just minor semantic opinion)
private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); }
.then(Mono.defer(() -> {
private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
My guess is that if no password/credential is set (aka iterable above is empty), the refresh can be skipped (so it could be a minor mistake here). But didn't really want to change it (in case I break something I am not aware of...)
private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); }
.then(Mono.defer(() -> {
private Mono<ServicePrincipal> submitCredentialsAsync(final ServicePrincipal servicePrincipal, Retry retry) { return Flux.defer(() -> Flux.fromIterable(passwordCredentialsToCreate) .flatMap(passwordCredential -> { Mono<MicrosoftGraphPasswordCredentialInner> monoAddPassword = manager().serviceClient().getServicePrincipals() .addPasswordAsync(id(), new ServicePrincipalsAddPasswordRequestBodyInner() .withPasswordCredential(passwordCredential.innerModel())); if (retry != null) { monoAddPassword = monoAddPassword.retryWhen(retry); } monoAddPassword = monoAddPassword.doOnNext(passwordCredential::setInner); return monoAddPassword; }) ) .then(Mono.defer(() -> { Mono<ServicePrincipal> monoRefresh = refreshAsync(); if (retry != null) { monoRefresh = monoRefresh.retryWhen(retry); } return monoRefresh; })); }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ServicePrincipalImpl extends CreatableUpdatableImpl<ServicePrincipal, MicrosoftGraphServicePrincipalInner, ServicePrincipalImpl> implements ServicePrincipal, ServicePrincipal.Definition, ServicePrincipal.Update, HasCredential<ServicePrincipalImpl> { private AuthorizationManager manager; private Map<String, PasswordCredential> cachedPasswordCredentials; private Map<String, CertificateCredential> cachedCertificateCredentials; private Map<String, RoleAssignment> cachedRoleAssignments; private Creatable<ActiveDirectoryApplication> applicationCreatable; private Map<String, BuiltInRole> rolesToCreate; private Set<String> rolesToDelete; String assignedSubscription; private List<CertificateCredentialImpl<?>> certificateCredentialsToCreate; private List<PasswordCredentialImpl<?>> passwordCredentialsToCreate; ServicePrincipalImpl(MicrosoftGraphServicePrincipalInner innerObject, AuthorizationManager manager) { super(innerObject.displayName(), innerObject); this.manager = manager; this.cachedRoleAssignments = new HashMap<>(); this.rolesToCreate = new HashMap<>(); this.rolesToDelete = new HashSet<>(); this.cachedCertificateCredentials = new HashMap<>(); this.certificateCredentialsToCreate = new ArrayList<>(); this.cachedPasswordCredentials = new HashMap<>(); this.passwordCredentialsToCreate = new ArrayList<>(); this.refreshCredentials(innerObject); } @Override public String applicationId() { return innerModel().appId(); } @Override public List<String> servicePrincipalNames() { return innerModel().servicePrincipalNames(); } @Override public Map<String, PasswordCredential> passwordCredentials() { return Collections.unmodifiableMap(cachedPasswordCredentials); } @Override public Map<String, CertificateCredential> certificateCredentials() { return Collections.unmodifiableMap(cachedCertificateCredentials); } @Override public Set<RoleAssignment> roleAssignments() { return Collections.unmodifiableSet(new HashSet<>(cachedRoleAssignments.values())); } @Override protected Mono<MicrosoftGraphServicePrincipalInner> getInnerAsync() { return manager.serviceClient().getServicePrincipalsServicePrincipals().getServicePrincipalAsync(id()) .doOnSuccess(this::refreshCredentials); } @Override public Mono<ServicePrincipal> createResourceAsync() { Retry retry = isInCreateMode() ? RetryUtils.backoffRetryFor404ResourceNotFound() : null; Mono<ServicePrincipal> sp; if (isInCreateMode()) { innerModel().withAccountEnabled(true); if (applicationCreatable != null) { ActiveDirectoryApplication application = this.taskResult(applicationCreatable.key()); innerModel().withAppId(application.applicationId()); } sp = manager.serviceClient().getServicePrincipalsServicePrincipals() .createServicePrincipalAsync(innerModel()).map(innerToFluentMap(this)); if (applicationCreatable != null) { sp = sp.retryWhen(RetryUtils.backoffRetryFor400BadRequest()); } } else { sp = manager().serviceClient().getServicePrincipalsServicePrincipals() .updateServicePrincipalAsync(id(), new MicrosoftGraphServicePrincipalInner() .withKeyCredentials(innerModel().keyCredentials()) .withPasswordCredentials(innerModel().passwordCredentials()) ).then(refreshAsync()); } return sp .flatMap( servicePrincipal -> submitCredentialsAsync(servicePrincipal, retry) .mergeWith(submitRolesAsync(servicePrincipal)) .last()) .map( servicePrincipal -> { for (PasswordCredentialImpl<?> passwordCredential : passwordCredentialsToCreate) { passwordCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); passwordCredential.consumeSecret(); } for (CertificateCredentialImpl<?> certificateCredential : certificateCredentialsToCreate) { certificateCredential.exportAuthFile((ServicePrincipalImpl) servicePrincipal); } passwordCredentialsToCreate.clear(); certificateCredentialsToCreate.clear(); return servicePrincipal; }); } private Mono<ServicePrincipal> submitRolesAsync(final ServicePrincipal servicePrincipal) { Mono<ServicePrincipal> create; if (rolesToCreate.isEmpty()) { create = Mono.just(servicePrincipal); } else { create = Flux .fromIterable(rolesToCreate.entrySet()) .flatMap(roleEntry -> manager() .roleAssignments() .define(this.manager().internalContext().randomUuid()) .forServicePrincipal(servicePrincipal) .withBuiltInRole(roleEntry.getValue()) .withScope(roleEntry.getKey()) .createAsync()) .doOnNext( indexable -> cachedRoleAssignments.put(indexable.id(), indexable)) .last() .map( indexable -> { rolesToCreate.clear(); return servicePrincipal; }); } Mono<ServicePrincipal> delete; if (rolesToDelete.isEmpty()) { delete = Mono.just(servicePrincipal); } else { delete = Flux .fromIterable(rolesToDelete) .flatMap( role -> manager() .roleAssignments() .deleteByIdAsync(cachedRoleAssignments.get(role).id()) .thenReturn(role)) .doOnNext(s -> cachedRoleAssignments.remove(s)) .last() .map( s -> { rolesToDelete.clear(); return servicePrincipal; }); } return create.mergeWith(delete).last(); } @Override public boolean isInCreateMode() { return id() == null; } void refreshCredentials(MicrosoftGraphServicePrincipalInner inner) { cachedCertificateCredentials.clear(); cachedPasswordCredentials.clear(); if (inner.keyCredentials() != null) { inner.keyCredentials().forEach(keyCredentialInner -> { CertificateCredential certificateCredential = new CertificateCredentialImpl<>(keyCredentialInner); cachedCertificateCredentials.put(certificateCredential.name(), certificateCredential); }); } if (inner.passwordCredentials() != null) { inner.passwordCredentials().forEach(passwordCredentialInner -> { PasswordCredential passwordCredential = new PasswordCredentialImpl<>(passwordCredentialInner); cachedPasswordCredentials.put(passwordCredential.name(), passwordCredential); }); } } @Override public Mono<ServicePrincipal> refreshAsync() { return getInnerAsync().map(innerToFluentMap(this)); } @Override public CertificateCredentialImpl<ServicePrincipalImpl> defineCertificateCredential(String name) { return new CertificateCredentialImpl<>(name, this); } @Override public PasswordCredentialImpl<ServicePrincipalImpl> definePasswordCredential(String name) { return new PasswordCredentialImpl<>(name, this); } @Override public ServicePrincipalImpl withoutCredential(String name) { if (cachedPasswordCredentials.containsKey(name)) { innerModel().passwordCredentials().remove(cachedPasswordCredentials.get(name).innerModel()); } else if (cachedCertificateCredentials.containsKey(name)) { innerModel().keyCredentials().remove(cachedCertificateCredentials.get(name).innerModel()); } return this; } @Override public ServicePrincipalImpl withCertificateCredential(CertificateCredentialImpl<?> credential) { this.certificateCredentialsToCreate.add(credential); if (innerModel().keyCredentials() == null) { innerModel().withKeyCredentials(new ArrayList<>()); } innerModel().keyCredentials().add(credential.innerModel()); return this; } @Override public ServicePrincipalImpl withPasswordCredential(PasswordCredentialImpl<?> credential) { this.passwordCredentialsToCreate.add(credential); return this; } @Override public ServicePrincipalImpl withExistingApplication(String id) { innerModel().withAppId(id); return this; } @Override public ServicePrincipalImpl withExistingApplication(ActiveDirectoryApplication application) { innerModel().withAppId(application.applicationId()); return this; } @Override public ServicePrincipalImpl withNewApplication(Creatable<ActiveDirectoryApplication> applicationCreatable) { this.addDependency(applicationCreatable); this.applicationCreatable = applicationCreatable; return this; } @Override public ServicePrincipalImpl withNewApplication(String signOnUrl) { return withNewApplication( manager.applications().define(name()).withSignOnUrl(signOnUrl).withIdentifierUrl(signOnUrl)); } @Override public ServicePrincipalImpl withNewApplication() { return withNewApplication( manager.applications().define(name())); } @Override public ServicePrincipalImpl withNewRole(BuiltInRole role, String scope) { this.rolesToCreate.put(scope, role); return this; } @Override public ServicePrincipalImpl withNewRoleInSubscription(BuiltInRole role, String subscriptionId) { this.assignedSubscription = subscriptionId; return withNewRole(role, "subscriptions/" + subscriptionId); } @Override public ServicePrincipalImpl withNewRoleInResourceGroup(BuiltInRole role, ResourceGroup resourceGroup) { return withNewRole(role, resourceGroup.id()); } @Override public Update withoutRole(RoleAssignment roleAssignment) { this.rolesToDelete.add(roleAssignment.id()); return this; } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
Have we tried the other type of `HttpResponse.BodyHandler`s here? Is it possible when we know we'll buffer if the `byte[]` variant is better
public HttpResponse sendSync(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); java.net.http.HttpRequest jdkRequest = toJdkHttpRequest(request, context); try { java.net.http.HttpResponse<InputStream> jdKResponse = jdkHttpClient.send(jdkRequest, ofInputStream()); JdkSyncHttpResponse response = new JdkSyncHttpResponse(request, jdKResponse); if (eagerlyReadResponse) { response.buffer(); } return response; } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } catch (InterruptedException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
java.net.http.HttpResponse<InputStream> jdKResponse = jdkHttpClient.send(jdkRequest, ofInputStream());
public HttpResponse sendSync(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); java.net.http.HttpRequest jdkRequest = toJdkHttpRequest(request, context); try { if (eagerlyReadResponse) { java.net.http.HttpResponse<byte[]> jdKResponse = jdkHttpClient.send(jdkRequest, ofByteArray()); return new JdkHttpResponseSync(request, jdKResponse.statusCode(), fromJdkHttpHeaders(jdKResponse.headers()), jdKResponse.body()); } else { java.net.http.HttpResponse<InputStream> jdKResponse = jdkHttpClient.send(jdkRequest, ofInputStream()); return new JdkHttpResponseSync(request, jdKResponse); } } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } catch (InterruptedException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
class JdkHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpClient.class); private final java.net.http.HttpClient jdkHttpClient; private final Set<String> restrictedHeaders; JdkHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) { this.jdkHttpClient = httpClient; int javaVersion = getJavaVersion(); if (javaVersion <= 11) { throw LOGGER.logExceptionAsError( new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below.")); } this.restrictedHeaders = restrictedHeaders; LOGGER.verbose("Effective restricted headers: {}", restrictedHeaders); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.fromCallable(() -> toJdkHttpRequest(request, context)) .flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher())) .flatMap(jdKResponse -> { if (eagerlyReadResponse) { HttpHeaders headers = fromJdkHttpHeaders(jdKResponse.headers()); int statusCode = jdKResponse.statusCode(); return FluxUtil.collectBytesFromNetworkResponse(JdkFlowAdapter .flowPublisherToFlux(jdKResponse.body()) .flatMapSequential(Flux::fromIterable), headers) .map(bytes -> new JdkSyncHttpResponse(request, statusCode, headers, bytes)); } return Mono.just(new JdkAsyncHttpResponse(request, jdKResponse)); })); } @Override /** * Converts the given azure-core request to the JDK HttpRequest type. * * @param request the azure-core request * @return the HttpRequest */ private java.net.http.HttpRequest toJdkHttpRequest(HttpRequest request, Context context) { ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter(); final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder(); try { builder.uri(request.getUrl().toURI()); } catch (URISyntaxException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } final HttpHeaders headers = request.getHeaders(); if (headers != null) { for (HttpHeader header : headers) { final String headerName = header.getName(); if (!restrictedHeaders.contains(headerName)) { header.getValuesList().forEach(headerValue -> builder.header(headerName, headerValue)); } else { LOGGER.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 " + "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties " + "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient" + ".allowRestrictedHeaders' and a comma separated list of header names."); } } } switch (request.getHttpMethod()) { case GET: return builder.GET().build(); case HEAD: return builder.method("HEAD", noBody()).build(); default: final String contentLength = request.getHeaders().getValue("content-length"); Flux<ByteBuffer> body = request.getBody(); if (progressReporter != null) { body = body.map(buffer -> { progressReporter.reportProgress(buffer.remaining()); return buffer; }); } final BodyPublisher bodyPublisher = toBodyPublisher(body, contentLength); return builder.method(request.getHttpMethod().toString(), bodyPublisher).build(); } } /** * Create BodyPublisher from the given java.nio.ByteBuffer publisher. * * @param bbPublisher stream of java.nio.ByteBuffer representing request content * @return the request BodyPublisher */ private static BodyPublisher toBodyPublisher(Flux<ByteBuffer> bbPublisher, String contentLength) { if (bbPublisher == null) { return noBody(); } final Flow.Publisher<ByteBuffer> bbFlowPublisher = JdkFlowAdapter.publisherToFlowPublisher(bbPublisher); if (CoreUtils.isNullOrEmpty(contentLength)) { return fromPublisher(bbFlowPublisher); } else { long contentLengthLong = Long.parseLong(contentLength); if (contentLengthLong < 1) { return noBody(); } else { return fromPublisher(bbFlowPublisher, contentLengthLong); } } } /** * Get the java runtime major version. * * @return the java major version */ private int getJavaVersion() { String version = System.getProperty("java.version"); if (CoreUtils.isNullOrEmpty(version)) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property.")); } if (version.startsWith("1.")) { if (version.length() < 3) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version)); } try { return Integer.parseInt(version.substring(2, 3)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } else { int idx = version.indexOf("."); if (idx == -1) { return Integer.parseInt(version); } try { return Integer.parseInt(version.substring(0, idx)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } } /** * Converts the given JDK Http headers to azure-core Http header. * * @param headers the JDK Http headers * @return the azure-core Http headers */ static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) { final HttpHeaders httpHeaders = new HttpHeaders(); for (Map.Entry<String, List<String>> kvp : headers.map().entrySet()) { if (CoreUtils.isNullOrEmpty(kvp.getValue())) { continue; } httpHeaders.set(kvp.getKey(), kvp.getValue()); } return httpHeaders; } }
class JdkHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpClient.class); private final java.net.http.HttpClient jdkHttpClient; private final Set<String> restrictedHeaders; JdkHttpClient(java.net.http.HttpClient httpClient, Set<String> restrictedHeaders) { this.jdkHttpClient = httpClient; int javaVersion = getJavaVersion(); if (javaVersion <= 11) { throw LOGGER.logExceptionAsError( new UnsupportedOperationException("JdkAsyncHttpClient is not supported in Java version 11 and below.")); } this.restrictedHeaders = restrictedHeaders; LOGGER.verbose("Effective restricted headers: {}", restrictedHeaders); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.fromCallable(() -> toJdkHttpRequest(request, context)) .flatMap(jdkRequest -> Mono.fromCompletionStage(jdkHttpClient.sendAsync(jdkRequest, ofPublisher())) .flatMap(jdKResponse -> { if (eagerlyReadResponse) { HttpHeaders headers = fromJdkHttpHeaders(jdKResponse.headers()); int statusCode = jdKResponse.statusCode(); return FluxUtil.collectBytesFromNetworkResponse(JdkFlowAdapter .flowPublisherToFlux(jdKResponse.body()) .flatMapSequential(Flux::fromIterable), headers) .map(bytes -> new JdkHttpResponseSync(request, statusCode, headers, bytes)); } return Mono.just(new JdkHttpResponseAsync(request, jdKResponse)); })); } @Override /** * Converts the given azure-core request to the JDK HttpRequest type. * * @param request the azure-core request * @return the HttpRequest */ private java.net.http.HttpRequest toJdkHttpRequest(HttpRequest request, Context context) { ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter(); final java.net.http.HttpRequest.Builder builder = java.net.http.HttpRequest.newBuilder(); try { builder.uri(request.getUrl().toURI()); } catch (URISyntaxException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } final HttpHeaders headers = request.getHeaders(); if (headers != null) { for (HttpHeader header : headers) { final String headerName = header.getName(); if (!restrictedHeaders.contains(headerName)) { header.getValuesList().forEach(headerValue -> builder.header(headerName, headerValue)); } else { LOGGER.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 " + "and above. This header can be added to allow list in JAVA_HOME/conf/net.properties " + "or in System.setProperty() or in Configuration. Use the key 'jdk.httpclient" + ".allowRestrictedHeaders' and a comma separated list of header names."); } } } switch (request.getHttpMethod()) { case GET: return builder.GET().build(); case HEAD: return builder.method("HEAD", noBody()).build(); default: java.net.http.HttpRequest.BodyPublisher bodyPublisher = BodyPublisherUtils.toBodyPublisher(request, progressReporter); return builder.method(request.getHttpMethod().toString(), bodyPublisher).build(); } } /** * Get the java runtime major version. * * @return the java major version */ private int getJavaVersion() { String version = System.getProperty("java.version"); if (CoreUtils.isNullOrEmpty(version)) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't find 'java.version' system property.")); } if (version.startsWith("1.")) { if (version.length() < 3) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version)); } try { return Integer.parseInt(version.substring(2, 3)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } else { int idx = version.indexOf("."); if (idx == -1) { return Integer.parseInt(version); } try { return Integer.parseInt(version.substring(0, idx)); } catch (Exception t) { throw LOGGER.logExceptionAsError(new RuntimeException("Can't parse 'java.version':" + version, t)); } } } /** * Converts the given JDK Http headers to azure-core Http header. * * @param headers the JDK Http headers * @return the azure-core Http headers */ static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) { final HttpHeaders httpHeaders = new HttpHeaders(); for (Map.Entry<String, List<String>> kvp : headers.map().entrySet()) { if (CoreUtils.isNullOrEmpty(kvp.getValue())) { continue; } httpHeaders.set(kvp.getKey(), kvp.getValue()); } return httpHeaders; } }
nit: method is checked with IOException, just let it be thrown
public void writeBodyTo(WritableByteChannel channel) throws IOException { if (bodyBytes != null) { channel.write(ByteBuffer.wrap(bodyBytes)); } else { try { int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { channel.write(ByteBuffer.wrap(data, 0, nRead)); } } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } this.close(); } }
}
public void writeBodyTo(WritableByteChannel channel) throws IOException { if (bodyBytes != null) { channel.write(ByteBuffer.wrap(bodyBytes)); } else { try { int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { channel.write(ByteBuffer.wrap(data, 0, nRead)); } } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } close(); } }
class JdkHttpResponseSync extends JdkHttpResponseBase { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpResponseSync.class); private BinaryData binaryData = null; public static final int STREAM_READ_SIZE = 8192; private final InputStream bodyStream; private byte[] bodyBytes; private volatile boolean disposed = false; JdkHttpResponseSync(final HttpRequest request, int statusCode, HttpHeaders headers, byte[] bytes) { super(request, statusCode, headers); this.bodyStream = null; this.bodyBytes = bytes; } JdkHttpResponseSync(final HttpRequest request, java.net.http.HttpResponse<InputStream> streamResponse) { super(request, streamResponse.statusCode(), fromJdkHttpHeaders(streamResponse.headers())); this.bodyStream = streamResponse.body(); this.bodyBytes = null; } @Override public Flux<ByteBuffer> getBody() { if (bodyBytes != null) { return Mono.fromSupplier(() -> ByteBuffer.wrap(bodyBytes)).flux(); } else { return FluxUtil.toFluxByteBuffer(bodyStream).doFinally(ignored -> close()); } } @Override public Mono<byte[]> getBodyAsByteArray() { if (bodyBytes != null) { return Mono.just(bodyBytes); } else { return super.getBodyAsByteArray(); } } @Override public BinaryData getBodyAsBinaryData() { if (bodyBytes != null) { return BinaryData.fromBytes(bodyBytes); } else { return getBinaryData(); } } @Override @Override public void close() { if (!disposed && this.bodyStream != null) { disposed = true; try { this.bodyStream.close(); } catch (IOException e) { LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } } @Override public HttpResponse buffer() { if (bodyBytes == null) { bodyBytes = getBytes(); close(); } return this; } private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private BinaryData getBinaryData() { if (binaryData == null) { binaryData = BinaryData.fromStream(bodyStream); } return binaryData; } }
class JdkHttpResponseSync extends JdkHttpResponseBase { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpResponseSync.class); private BinaryData binaryData = null; public static final int STREAM_READ_SIZE = 8192; private final InputStream bodyStream; private byte[] bodyBytes; private volatile boolean disposed = false; JdkHttpResponseSync(final HttpRequest request, int statusCode, HttpHeaders headers, byte[] bytes) { super(request, statusCode, headers); this.bodyStream = null; this.bodyBytes = bytes; } JdkHttpResponseSync(final HttpRequest request, java.net.http.HttpResponse<InputStream> streamResponse) { super(request, streamResponse.statusCode(), fromJdkHttpHeaders(streamResponse.headers())); this.bodyStream = streamResponse.body(); this.bodyBytes = null; } @Override public Flux<ByteBuffer> getBody() { if (bodyBytes != null) { return Mono.fromSupplier(() -> ByteBuffer.wrap(bodyBytes)).flux(); } else { return FluxUtil.toFluxByteBuffer(bodyStream).doFinally(ignored -> close()); } } @Override public Mono<byte[]> getBodyAsByteArray() { if (bodyBytes != null) { return Mono.just(bodyBytes); } else { return super.getBodyAsByteArray(); } } @Override public BinaryData getBodyAsBinaryData() { if (bodyBytes != null) { return BinaryData.fromBytes(bodyBytes); } else { return getBinaryData(); } } @Override @Override public void close() { if (!disposed && bodyStream != null) { disposed = true; try { bodyStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } } @Override public HttpResponse buffer() { if (bodyBytes == null) { bodyBytes = getBytes(); close(); } return this; } private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private BinaryData getBinaryData() { if (binaryData == null) { binaryData = BinaryData.fromStream(bodyStream); } return binaryData; } }
it's not checked in `HttpResponse` and I have to wrap it here
public void writeBodyTo(WritableByteChannel channel) throws IOException { if (bodyBytes != null) { channel.write(ByteBuffer.wrap(bodyBytes)); } else { try { int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { channel.write(ByteBuffer.wrap(data, 0, nRead)); } } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } this.close(); } }
}
public void writeBodyTo(WritableByteChannel channel) throws IOException { if (bodyBytes != null) { channel.write(ByteBuffer.wrap(bodyBytes)); } else { try { int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { channel.write(ByteBuffer.wrap(data, 0, nRead)); } } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } close(); } }
class JdkHttpResponseSync extends JdkHttpResponseBase { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpResponseSync.class); private BinaryData binaryData = null; public static final int STREAM_READ_SIZE = 8192; private final InputStream bodyStream; private byte[] bodyBytes; private volatile boolean disposed = false; JdkHttpResponseSync(final HttpRequest request, int statusCode, HttpHeaders headers, byte[] bytes) { super(request, statusCode, headers); this.bodyStream = null; this.bodyBytes = bytes; } JdkHttpResponseSync(final HttpRequest request, java.net.http.HttpResponse<InputStream> streamResponse) { super(request, streamResponse.statusCode(), fromJdkHttpHeaders(streamResponse.headers())); this.bodyStream = streamResponse.body(); this.bodyBytes = null; } @Override public Flux<ByteBuffer> getBody() { if (bodyBytes != null) { return Mono.fromSupplier(() -> ByteBuffer.wrap(bodyBytes)).flux(); } else { return FluxUtil.toFluxByteBuffer(bodyStream).doFinally(ignored -> close()); } } @Override public Mono<byte[]> getBodyAsByteArray() { if (bodyBytes != null) { return Mono.just(bodyBytes); } else { return super.getBodyAsByteArray(); } } @Override public BinaryData getBodyAsBinaryData() { if (bodyBytes != null) { return BinaryData.fromBytes(bodyBytes); } else { return getBinaryData(); } } @Override @Override public void close() { if (!disposed && this.bodyStream != null) { disposed = true; try { this.bodyStream.close(); } catch (IOException e) { LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } } @Override public HttpResponse buffer() { if (bodyBytes == null) { bodyBytes = getBytes(); close(); } return this; } private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private BinaryData getBinaryData() { if (binaryData == null) { binaryData = BinaryData.fromStream(bodyStream); } return binaryData; } }
class JdkHttpResponseSync extends JdkHttpResponseBase { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpResponseSync.class); private BinaryData binaryData = null; public static final int STREAM_READ_SIZE = 8192; private final InputStream bodyStream; private byte[] bodyBytes; private volatile boolean disposed = false; JdkHttpResponseSync(final HttpRequest request, int statusCode, HttpHeaders headers, byte[] bytes) { super(request, statusCode, headers); this.bodyStream = null; this.bodyBytes = bytes; } JdkHttpResponseSync(final HttpRequest request, java.net.http.HttpResponse<InputStream> streamResponse) { super(request, streamResponse.statusCode(), fromJdkHttpHeaders(streamResponse.headers())); this.bodyStream = streamResponse.body(); this.bodyBytes = null; } @Override public Flux<ByteBuffer> getBody() { if (bodyBytes != null) { return Mono.fromSupplier(() -> ByteBuffer.wrap(bodyBytes)).flux(); } else { return FluxUtil.toFluxByteBuffer(bodyStream).doFinally(ignored -> close()); } } @Override public Mono<byte[]> getBodyAsByteArray() { if (bodyBytes != null) { return Mono.just(bodyBytes); } else { return super.getBodyAsByteArray(); } } @Override public BinaryData getBodyAsBinaryData() { if (bodyBytes != null) { return BinaryData.fromBytes(bodyBytes); } else { return getBinaryData(); } } @Override @Override public void close() { if (!disposed && bodyStream != null) { disposed = true; try { bodyStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } } @Override public HttpResponse buffer() { if (bodyBytes == null) { bodyBytes = getBytes(); close(); } return this; } private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private BinaryData getBinaryData() { if (binaryData == null) { binaryData = BinaryData.fromStream(bodyStream); } return binaryData; } }
It looks checked to me? https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java#L163
public void writeBodyTo(WritableByteChannel channel) throws IOException { if (bodyBytes != null) { channel.write(ByteBuffer.wrap(bodyBytes)); } else { try { int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { channel.write(ByteBuffer.wrap(data, 0, nRead)); } } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } this.close(); } }
}
public void writeBodyTo(WritableByteChannel channel) throws IOException { if (bodyBytes != null) { channel.write(ByteBuffer.wrap(bodyBytes)); } else { try { int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { channel.write(ByteBuffer.wrap(data, 0, nRead)); } } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } close(); } }
class JdkHttpResponseSync extends JdkHttpResponseBase { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpResponseSync.class); private BinaryData binaryData = null; public static final int STREAM_READ_SIZE = 8192; private final InputStream bodyStream; private byte[] bodyBytes; private volatile boolean disposed = false; JdkHttpResponseSync(final HttpRequest request, int statusCode, HttpHeaders headers, byte[] bytes) { super(request, statusCode, headers); this.bodyStream = null; this.bodyBytes = bytes; } JdkHttpResponseSync(final HttpRequest request, java.net.http.HttpResponse<InputStream> streamResponse) { super(request, streamResponse.statusCode(), fromJdkHttpHeaders(streamResponse.headers())); this.bodyStream = streamResponse.body(); this.bodyBytes = null; } @Override public Flux<ByteBuffer> getBody() { if (bodyBytes != null) { return Mono.fromSupplier(() -> ByteBuffer.wrap(bodyBytes)).flux(); } else { return FluxUtil.toFluxByteBuffer(bodyStream).doFinally(ignored -> close()); } } @Override public Mono<byte[]> getBodyAsByteArray() { if (bodyBytes != null) { return Mono.just(bodyBytes); } else { return super.getBodyAsByteArray(); } } @Override public BinaryData getBodyAsBinaryData() { if (bodyBytes != null) { return BinaryData.fromBytes(bodyBytes); } else { return getBinaryData(); } } @Override @Override public void close() { if (!disposed && this.bodyStream != null) { disposed = true; try { this.bodyStream.close(); } catch (IOException e) { LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } } @Override public HttpResponse buffer() { if (bodyBytes == null) { bodyBytes = getBytes(); close(); } return this; } private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private BinaryData getBinaryData() { if (binaryData == null) { binaryData = BinaryData.fromStream(bodyStream); } return binaryData; } }
class JdkHttpResponseSync extends JdkHttpResponseBase { private static final ClientLogger LOGGER = new ClientLogger(JdkHttpResponseSync.class); private BinaryData binaryData = null; public static final int STREAM_READ_SIZE = 8192; private final InputStream bodyStream; private byte[] bodyBytes; private volatile boolean disposed = false; JdkHttpResponseSync(final HttpRequest request, int statusCode, HttpHeaders headers, byte[] bytes) { super(request, statusCode, headers); this.bodyStream = null; this.bodyBytes = bytes; } JdkHttpResponseSync(final HttpRequest request, java.net.http.HttpResponse<InputStream> streamResponse) { super(request, streamResponse.statusCode(), fromJdkHttpHeaders(streamResponse.headers())); this.bodyStream = streamResponse.body(); this.bodyBytes = null; } @Override public Flux<ByteBuffer> getBody() { if (bodyBytes != null) { return Mono.fromSupplier(() -> ByteBuffer.wrap(bodyBytes)).flux(); } else { return FluxUtil.toFluxByteBuffer(bodyStream).doFinally(ignored -> close()); } } @Override public Mono<byte[]> getBodyAsByteArray() { if (bodyBytes != null) { return Mono.just(bodyBytes); } else { return super.getBodyAsByteArray(); } } @Override public BinaryData getBodyAsBinaryData() { if (bodyBytes != null) { return BinaryData.fromBytes(bodyBytes); } else { return getBinaryData(); } } @Override @Override public void close() { if (!disposed && bodyStream != null) { disposed = true; try { bodyStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } } @Override public HttpResponse buffer() { if (bodyBytes == null) { bodyBytes = getBytes(); close(); } return this; } private byte[] getBytes() { try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[STREAM_READ_SIZE]; while ((nRead = bodyStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return dataOutputBuffer.toByteArray(); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } private BinaryData getBinaryData() { if (binaryData == null) { binaryData = BinaryData.fromStream(bodyStream); } return binaryData; } }
nit: we could get rid of these comments (not necessary to complete this PR though)
public static String convertStorageExceptionMessage(String message, HttpResponse response) { if (response != null) { if (response.getStatusCode() == 403) { return STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE + message; } if (response.getRequest() != null && response.getRequest().getHttpMethod() != null && response.getRequest().getHttpMethod().equals(HttpMethod.HEAD) && response.getHeaders().getValue(ERROR_CODE) != null) { int indexOfEmptyBody = message.indexOf("(empty body)"); if (indexOfEmptyBody >= 0) { return message.substring(0, indexOfEmptyBody) + response.getHeaders().getValue(ERROR_CODE) + message.substring(indexOfEmptyBody + 12); } } } return message; }
public static String convertStorageExceptionMessage(String message, HttpResponse response) { if (response != null) { if (response.getStatusCode() == 403) { return STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE + message; } if (response.getRequest() != null && response.getRequest().getHttpMethod() != null && response.getRequest().getHttpMethod().equals(HttpMethod.HEAD) && response.getHeaders().getValue(ERROR_CODE) != null) { int indexOfEmptyBody = message.indexOf("(empty body)"); if (indexOfEmptyBody >= 0) { return message.substring(0, indexOfEmptyBody) + response.getHeaders().getValue(ERROR_CODE) + message.substring(indexOfEmptyBody + 12); } } } return message; }
class StorageImplUtils { private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class); private static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments."; private static final String STRING_TO_SIGN_LOG_INFO_MESSAGE = "The string to sign computed by the SDK is: {}{}"; private static final String STRING_TO_SIGN_LOG_WARNING_MESSAGE = "Please remember to disable '{}' before going " + "to production as this string can potentially contain PII."; private static final String STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE = String.format( "If you are using a StorageSharedKeyCredential, and the server returned an " + "error message that says 'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value pair " + "'%s': true to the appropriate method call.%n" + "If you are using a SAS token, and the server returned an error message that says " + "'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value " + "pair '%s': true to the appropriate generateSas method call.%n" + "Please remember to disable '%s' before going to production as this string can potentially " + "contain PII.%n", Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN); /** * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String INVALID_DATE_STRING = "Invalid Date String: %s."; /** * Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS"; /** * The length of a datestring that matches the MAX_PRECISION_PATTERN. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "") .length(); /** * Stores a reference to the ISO8601 date/time pattern. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'"; /** * Stores a reference to the ISO8601 date/time pattern. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'"; /** * A compiled Pattern that finds 'Z'. This is used as Java 8's String.replace method uses Pattern.compile * internally without simple case opt-outs. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final Pattern Z_PATTERN = Pattern.compile("Z"); private static final DateTimeFormatter MAX_PRECISION_FORMATTER = DateTimeFormatter.ofPattern(MAX_PRECISION_PATTERN) .withLocale(Locale.ROOT); private static final DateTimeFormatter ISO8601_FORMATTER = DateTimeFormatter.ofPattern(ISO8601_PATTERN) .withLocale(Locale.ROOT); private static final DateTimeFormatter NO_SECONDS_FORMATTER = DateTimeFormatter .ofPattern(ISO8601_PATTERN_NO_SECONDS) .withLocale(Locale.ROOT); /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) { TreeMap<String, String[]> pieces = new TreeMap<>(); if (CoreUtils.isNullOrEmpty(queryString)) { return pieces; } for (String kvp : queryString.split("&")) { int equalIndex = kvp.indexOf('='); String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT)); String[] value = kvp.substring(equalIndex + 1).split(","); for (int i = 0; i < value.length; i++) { value[i] = urlDecode(value[i]); } pieces.putIfAbsent(key, value); } return pieces; } /** * Blocks an asynchronous response with an optional timeout. * * @param response Asynchronous response to block * @param timeout Optional timeout * @param <T> Return type of the asynchronous response * @return the value of the asynchronous response * @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires. */ public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) { if (timeout == null) { return response.block(); } else { return response.block(timeout); } } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Mono to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Mono. * @return Mono with an applied timeout, if any. */ public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Asserts that a value is not {@code null}. * * @param param Name of the parameter * @param value Value of the parameter * @throws NullPointerException If {@code value} is {@code null} */ public static void assertNotNull(final String param, final Object value) { if (value == null) { throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param)); } } /** * Asserts that the specified number is in the valid range. The range is inclusive. * * @param param Name of the parameter * @param value Value of the parameter * @param min The minimum allowed value * @param max The maximum allowed value * @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than * {@code max}. */ public static void assertInBounds(final String param, final long value, final long min, final long max) { if (value < min || value > max) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT, PARAMETER_NOT_IN_RANGE, param, min, max))); } } /** * Computes a signature for the specified string using the HMAC-SHA256 algorithm. * * @param base64Key Base64 encoded key used to sign the string * @param stringToSign UTF-8 encoded string to sign * @return the HMAC-SHA256 encoded signature * @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded * string, or the UTF-8 charset isn't supported. */ public static String computeHMac256(final String base64Key, final String stringToSign) { try { byte[] key = Base64.getDecoder().decode(base64Key); Mac hmacSHA256 = Mac.getInstance("HmacSHA256"); hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes)); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new RuntimeException(ex); } } /** * Appends a string to the end of the passed URL's path. * * @param baseURL URL having a path appended * @param name Name of the path * @return a URL with the path appended. * @throws IllegalArgumentException If {@code name} causes the URL to become malformed. */ public static URL appendToUrlPath(String baseURL, String name) { UrlBuilder builder = UrlBuilder.parse(baseURL); if (builder.getPath() == null) { builder.setPath("/"); } else if (!builder.getPath().endsWith("/")) { builder.setPath(builder.getPath() + "/"); } builder.setPath(builder.getPath() + name); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the last path segment from the passed URL. * * @param baseUrl URL having its last path segment stripped * @return a URL with the path segment stripped. * @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it * doesn't contain any path segments. */ public static URL stripLastPathSegment(URL baseUrl) { UrlBuilder builder = UrlBuilder.parse(baseUrl); if (builder.getPath() == null || !builder.getPath().contains("/")) { throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl)); } builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/"))); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the account name from host part of the URL object. * * @param url URL having its hostanme * @return account name. */ public static String getAccountName(URL url) { UrlBuilder builder = UrlBuilder.parse(url); String accountName = null; String host = builder.getHost(); if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } return accountName; } /** Returns an empty string if value is {@code null}, otherwise returns value * @param value The value to check and return. * @return The value or empty string. */ public static String emptyIfNull(String value) { return value == null ? "" : value; } /** * Logs the string to sign if a valid context is provided. * * @param logger {@link ClientLogger} * @param stringToSign The string to sign to log. * @param context Additional context to determine if the string to sign should be logged. */ public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) { if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) { logger.info(STRING_TO_SIGN_LOG_INFO_MESSAGE, stringToSign, System.lineSeparator()); logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN); } } /** * Converts the storage exception message. * * @param message The storage exception message * @param response The storage service response. * @return The converted storage exception message. */ /** * Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to * millisecond precision. * * @param dateString the {@code String} to be interpreted as a <code>Date</code> * @return the corresponding <code>Date</code> object * @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern */ public static TimeAndFormat parseDateAndFormat(String dateString) { DateTimeFormatter formatter = MAX_PRECISION_FORMATTER; switch (dateString.length()) { case 28: case 27: case 26: case 25: case 24: dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH); break; case 23: dateString = Z_PATTERN.matcher(dateString).replaceAll("0"); break; case 22: dateString = Z_PATTERN.matcher(dateString).replaceAll("00"); break; case 20: formatter = ISO8601_FORMATTER; break; case 17: formatter = NO_SECONDS_FORMATTER; break; default: throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString)); } return new TimeAndFormat(LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime(), formatter); } }
class StorageImplUtils { private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class); private static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments."; private static final String STRING_TO_SIGN_LOG_INFO_MESSAGE = "The string to sign computed by the SDK is: {}{}"; private static final String STRING_TO_SIGN_LOG_WARNING_MESSAGE = "Please remember to disable '{}' before going " + "to production as this string can potentially contain PII."; private static final String STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE = String.format( "If you are using a StorageSharedKeyCredential, and the server returned an " + "error message that says 'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value pair " + "'%s': true to the appropriate method call.%n" + "If you are using a SAS token, and the server returned an error message that says " + "'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value " + "pair '%s': true to the appropriate generateSas method call.%n" + "Please remember to disable '%s' before going to production as this string can potentially " + "contain PII.%n", Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN); /** * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String INVALID_DATE_STRING = "Invalid Date String: %s."; /** * Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS"; /** * The length of a datestring that matches the MAX_PRECISION_PATTERN. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "") .length(); /** * Stores a reference to the ISO8601 date/time pattern. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'"; /** * Stores a reference to the ISO8601 date/time pattern. * @deprecated See value in {@link StorageImplUtils} */ @Deprecated public static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'"; private static final DateTimeFormatter MAX_PRECISION_FORMATTER = DateTimeFormatter.ofPattern(MAX_PRECISION_PATTERN) .withLocale(Locale.ROOT); private static final DateTimeFormatter ISO8601_FORMATTER = DateTimeFormatter.ofPattern(ISO8601_PATTERN) .withLocale(Locale.ROOT); private static final DateTimeFormatter NO_SECONDS_FORMATTER = DateTimeFormatter .ofPattern(ISO8601_PATTERN_NO_SECONDS) .withLocale(Locale.ROOT); /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) { TreeMap<String, String[]> pieces = new TreeMap<>(); if (CoreUtils.isNullOrEmpty(queryString)) { return pieces; } for (String kvp : queryString.split("&")) { int equalIndex = kvp.indexOf('='); String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT)); String[] value = kvp.substring(equalIndex + 1).split(","); for (int i = 0; i < value.length; i++) { value[i] = urlDecode(value[i]); } pieces.putIfAbsent(key, value); } return pieces; } /** * Blocks an asynchronous response with an optional timeout. * * @param response Asynchronous response to block * @param timeout Optional timeout * @param <T> Return type of the asynchronous response * @return the value of the asynchronous response * @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires. */ public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) { if (timeout == null) { return response.block(); } else { return response.block(timeout); } } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Mono to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Mono. * @return Mono with an applied timeout, if any. */ public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Asserts that a value is not {@code null}. * * @param param Name of the parameter * @param value Value of the parameter * @throws NullPointerException If {@code value} is {@code null} */ public static void assertNotNull(final String param, final Object value) { if (value == null) { throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param)); } } /** * Asserts that the specified number is in the valid range. The range is inclusive. * * @param param Name of the parameter * @param value Value of the parameter * @param min The minimum allowed value * @param max The maximum allowed value * @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than * {@code max}. */ public static void assertInBounds(final String param, final long value, final long min, final long max) { if (value < min || value > max) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT, PARAMETER_NOT_IN_RANGE, param, min, max))); } } /** * Computes a signature for the specified string using the HMAC-SHA256 algorithm. * * @param base64Key Base64 encoded key used to sign the string * @param stringToSign UTF-8 encoded string to sign * @return the HMAC-SHA256 encoded signature * @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded * string, or the UTF-8 charset isn't supported. */ public static String computeHMac256(final String base64Key, final String stringToSign) { try { byte[] key = Base64.getDecoder().decode(base64Key); Mac hmacSHA256 = Mac.getInstance("HmacSHA256"); hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes)); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new RuntimeException(ex); } } /** * Appends a string to the end of the passed URL's path. * * @param baseURL URL having a path appended * @param name Name of the path * @return a URL with the path appended. * @throws IllegalArgumentException If {@code name} causes the URL to become malformed. */ public static URL appendToUrlPath(String baseURL, String name) { UrlBuilder builder = UrlBuilder.parse(baseURL); if (builder.getPath() == null) { builder.setPath("/"); } else if (!builder.getPath().endsWith("/")) { builder.setPath(builder.getPath() + "/"); } builder.setPath(builder.getPath() + name); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the last path segment from the passed URL. * * @param baseUrl URL having its last path segment stripped * @return a URL with the path segment stripped. * @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it * doesn't contain any path segments. */ public static URL stripLastPathSegment(URL baseUrl) { UrlBuilder builder = UrlBuilder.parse(baseUrl); if (builder.getPath() == null || !builder.getPath().contains("/")) { throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl)); } builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/"))); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the account name from host part of the URL object. * * @param url URL having its hostanme * @return account name. */ public static String getAccountName(URL url) { UrlBuilder builder = UrlBuilder.parse(url); String accountName = null; String host = builder.getHost(); if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } return accountName; } /** Returns an empty string if value is {@code null}, otherwise returns value * @param value The value to check and return. * @return The value or empty string. */ public static String emptyIfNull(String value) { return value == null ? "" : value; } /** * Logs the string to sign if a valid context is provided. * * @param logger {@link ClientLogger} * @param stringToSign The string to sign to log. * @param context Additional context to determine if the string to sign should be logged. */ public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) { if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) { logger.info(STRING_TO_SIGN_LOG_INFO_MESSAGE, stringToSign, System.lineSeparator()); logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN); } } /** * Converts the storage exception message. * * @param message The storage exception message * @param response The storage service response. * @return The converted storage exception message. */ /** * Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it with up to * millisecond precision. * * @param dateString the {@code String} to be interpreted as a <code>Date</code> * @return the corresponding <code>Date</code> object * @throws IllegalArgumentException If {@code dateString} doesn't match an ISO8601 pattern */ public static TimeAndFormat parseDateAndFormat(String dateString) { DateTimeFormatter formatter = MAX_PRECISION_FORMATTER; switch (dateString.length()) { case 28: case 27: case 26: case 25: case 24: dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH); break; case 23: if (dateString.endsWith("Z")) { dateString = dateString.substring(0, 22) + "0"; } break; case 22: if (dateString.endsWith("Z")) { dateString = dateString.substring(0, 21) + "00"; } break; case 20: formatter = ISO8601_FORMATTER; break; case 17: formatter = NO_SECONDS_FORMATTER; break; default: throw new IllegalArgumentException(String.format(Locale.ROOT, INVALID_DATE_STRING, dateString)); } return new TimeAndFormat(LocalDateTime.parse(dateString, formatter).atZone(ZoneOffset.UTC).toOffsetDateTime(), formatter); } }
you are checking for null, but your `answerCallWithResponse` seems to be receiving MediaStreamingConfiguration. that doesn't seem to be accurate.
public void answerCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, null), 200) ))); Response<AnswerCallResult> answerCallResult = callAutomationAsyncClient.answerCallWithResponse( CALL_INCOMING_CALL_CONTEXT, CALL_CALLBACK_URL, MEDIA_STREAMING_CONFIGURATION).block(); assertNotNull(answerCallResult); assertEquals(200, answerCallResult.getStatusCode()); assertNotNull(answerCallResult.getValue()); assertEquals("mediaSubscriptionId", null); }
assertEquals("mediaSubscriptionId", null);
public void answerCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, MEDIA_SUBSCRIPTION_ID), 200) ))); Response<AnswerCallResult> answerCallResult = callAutomationAsyncClient.answerCallWithResponse( CALL_INCOMING_CALL_CONTEXT, CALL_CALLBACK_URL, MEDIA_STREAMING_CONFIGURATION).block(); assertNotNull(answerCallResult); assertEquals(200, answerCallResult.getStatusCode()); assertNotNull(answerCallResult.getValue()); assertEquals("mediaSubscriptionId", answerCallResult.getValue().getCallConnectionProperties().getMediaSubscriptionId()); }
class CallAutomationAsyncClientUnitTests extends CallAutomationUnitTestBase { @Test public void createCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, null), 201) ))); CommunicationUserIdentifier caller = new CommunicationUserIdentifier(CALL_CALLER_ID); List<CommunicationIdentifier> targets = new ArrayList<>(Collections.singletonList(new CommunicationUserIdentifier(CALL_TARGET_ID))); CreateCallOptions callOptions = new CreateCallOptions(caller, targets, CALL_CALLBACK_URL); callOptions.setSubject(CALL_SUBJECT); CreateCallResult createCallResult = callAutomationAsyncClient.createCall(callOptions).block(); assertNotNull(createCallResult); } @Test public void createCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, MEDIA_SUBSCRIPTION_ID), 201) ))); CommunicationUserIdentifier caller = new CommunicationUserIdentifier(CALL_CALLER_ID); List<CommunicationIdentifier> targets = new ArrayList<>(Collections.singletonList(new CommunicationUserIdentifier(CALL_TARGET_ID))); CreateCallOptions callOptions = new CreateCallOptions(caller, targets, CALL_CALLBACK_URL); callOptions.setSubject(CALL_SUBJECT); callOptions.setMediaStreamingConfiguration(MEDIA_STREAMING_CONFIGURATION); Response<CreateCallResult> createCallResult = callAutomationAsyncClient.createCallWithResponse(callOptions).block(); assertNotNull(createCallResult); assertEquals(201, createCallResult.getStatusCode()); assertNotNull(createCallResult.getValue()); assertEquals("mediaSubscriptionId", null); } @Test public void answerCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, null), 200) ))); AnswerCallResult answerCallResult = callAutomationAsyncClient.answerCall(CALL_INCOMING_CALL_CONTEXT, CALL_CALLBACK_URL).block(); assertNotNull(answerCallResult); } @Test @Test public void redirectCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); CommunicationUserIdentifier target = new CommunicationUserIdentifier(CALL_TARGET_ID); callAutomationAsyncClient.redirectCall(CALL_INCOMING_CALL_CONTEXT, target); } @Test public void redirectCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); CommunicationUserIdentifier target = new CommunicationUserIdentifier(CALL_TARGET_ID); Response<Void> redirectCallResponse = callAutomationAsyncClient.redirectCallWithResponse( CALL_INCOMING_CALL_CONTEXT, target).block(); assertNotNull(redirectCallResponse); assertEquals(204, redirectCallResponse.getStatusCode()); } @Test public void rejectCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); callAutomationAsyncClient.rejectCall(CALL_INCOMING_CALL_CONTEXT, CallRejectReason.BUSY); } @Test public void rejectCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); Response<Void> rejectCallResponse = callAutomationAsyncClient.rejectCallWithResponse(CALL_INCOMING_CALL_CONTEXT, CallRejectReason.BUSY).block(); assertNotNull(rejectCallResponse); assertEquals(204, rejectCallResponse.getStatusCode()); } }
class CallAutomationAsyncClientUnitTests extends CallAutomationUnitTestBase { @Test public void createCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, null), 201) ))); CommunicationUserIdentifier caller = new CommunicationUserIdentifier(CALL_CALLER_ID); List<CommunicationIdentifier> targets = new ArrayList<>(Collections.singletonList(new CommunicationUserIdentifier(CALL_TARGET_ID))); CreateCallOptions callOptions = new CreateCallOptions(caller, targets, CALL_CALLBACK_URL); callOptions.setSubject(CALL_SUBJECT); CreateCallResult createCallResult = callAutomationAsyncClient.createCall(callOptions).block(); assertNotNull(createCallResult); } @Test public void createCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, MEDIA_SUBSCRIPTION_ID), 201) ))); CommunicationUserIdentifier caller = new CommunicationUserIdentifier(CALL_CALLER_ID); List<CommunicationIdentifier> targets = new ArrayList<>(Collections.singletonList(new CommunicationUserIdentifier(CALL_TARGET_ID))); CreateCallOptions callOptions = new CreateCallOptions(caller, targets, CALL_CALLBACK_URL); callOptions.setSubject(CALL_SUBJECT); callOptions.setMediaStreamingConfiguration(MEDIA_STREAMING_CONFIGURATION); Response<CreateCallResult> createCallResult = callAutomationAsyncClient.createCallWithResponse(callOptions).block(); assertNotNull(createCallResult); assertEquals(201, createCallResult.getStatusCode()); assertNotNull(createCallResult.getValue()); assertEquals("mediaSubscriptionId", createCallResult.getValue().getCallConnectionProperties().getMediaSubscriptionId()); } @Test public void answerCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>(generateCallProperties(CALL_CONNECTION_ID, CALL_SERVER_CALL_ID, CALL_CALLER_ID, CALL_TARGET_ID, CALL_CONNECTION_STATE, CALL_SUBJECT, CALL_CALLBACK_URL, null), 200) ))); AnswerCallResult answerCallResult = callAutomationAsyncClient.answerCall(CALL_INCOMING_CALL_CONTEXT, CALL_CALLBACK_URL).block(); assertNotNull(answerCallResult); } @Test @Test public void redirectCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); CommunicationUserIdentifier target = new CommunicationUserIdentifier(CALL_TARGET_ID); callAutomationAsyncClient.redirectCall(CALL_INCOMING_CALL_CONTEXT, target); } @Test public void redirectCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); CommunicationUserIdentifier target = new CommunicationUserIdentifier(CALL_TARGET_ID); Response<Void> redirectCallResponse = callAutomationAsyncClient.redirectCallWithResponse( CALL_INCOMING_CALL_CONTEXT, target).block(); assertNotNull(redirectCallResponse); assertEquals(204, redirectCallResponse.getStatusCode()); } @Test public void rejectCall() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); callAutomationAsyncClient.rejectCall(CALL_INCOMING_CALL_CONTEXT, CallRejectReason.BUSY); } @Test public void rejectCallWithResponse() { CallAutomationAsyncClient callAutomationAsyncClient = getCallAutomationAsyncClient(new ArrayList<>( Collections.singletonList( new AbstractMap.SimpleEntry<>("", 204) )) ); Response<Void> rejectCallResponse = callAutomationAsyncClient.rejectCallWithResponse(CALL_INCOMING_CALL_CONTEXT, CallRejectReason.BUSY).block(); assertNotNull(rejectCallResponse); assertEquals(204, rejectCallResponse.getStatusCode()); } }
we are finding an issue where this gets defaulted to 0, we are syncing on what the behavior should be. I'll keep you posted.
Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { DtmfOptionsInternal dtmfOptionsInternal = new DtmfOptionsInternal(); if (((CallMediaRecognizeDtmfOptions) recognizeOptions).getInterToneTimeout() != null) { dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) ((CallMediaRecognizeDtmfOptions) recognizeOptions).getInterToneTimeout().getSeconds()); } if (((CallMediaRecognizeDtmfOptions) recognizeOptions).getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(((CallMediaRecognizeDtmfOptions) recognizeOptions).getMaxTonesToCollect()); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); if (recognizeOptions.getInitialSilenceTimeout() != null) { recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); } PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternal((FileSource) playSource); } } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } }
dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) ((CallMediaRecognizeDtmfOptions) recognizeOptions).getInterToneTimeout().getSeconds());
new DtmfOptionsInternal(); dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) dtmfRecognizeOptions.getInterToneTimeout().getSeconds()); if (dtmfRecognizeOptions.getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(dtmfRecognizeOptions.getMaxTonesToCollect()); }
class CallMediaAsync { private final ContentsImpl contentsInternal; private final String callConnectionId; private final ClientLogger logger; CallMediaAsync(String callConnectionId, ContentsImpl contentsInternal) { this.callConnectionId = callConnectionId; this.contentsInternal = contentsInternal; this.logger = new ClientLogger(CallMediaAsync.class); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful play request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) { return playWithResponse(playSource, playTo, null).flatMap(FluxUtil::toMono); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> playToAll(PlaySource playSource) { return playToAllWithResponse(playSource, null).flatMap(FluxUtil::toMono); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @param options play options. * @return Response for successful play request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playWithResponse(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { return playWithResponseInternal(playSource, playTo, options, null); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @param options play options. * @return Response for successful playAll request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playToAllWithResponse(PlaySource playSource, PlayOptions options) { return playWithResponseInternal(playSource, Collections.emptyList(), options, null); } /** * Recognize operation. * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Void> recognize(CallMediaRecognizeOptions recognizeOptions) { return recognizeWithResponse(recognizeOptions).then(); } /** * Recognize operation * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Response<Void>> recognizeWithResponse(CallMediaRecognizeOptions recognizeOptions) { return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context)); } Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { DtmfOptionsInternal dtmfOptionsInternal = if (((CallMediaRecognizeDtmfOptions) recognizeOptions).getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(((CallMediaRecognizeDtmfOptions) recognizeOptions).getMaxTonesToCollect()); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); if (recognizeOptions.getInitialSilenceTimeout() != null) { recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); } PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternal((FileSource) playSource); } } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } } /** * Cancels all the queued media operations. * @return Void */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelAllMediaOperations() { return cancelAllMediaOperationsWithResponse().then(); } /** * Cancels all the queued media operations * @return Response for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() { return cancelAllMediaOperationsWithResponseInternal(null); } Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> playWithResponseInternal(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; PlayRequest request = getPlayRequest(playSource, playTo, options); return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } PlayRequest getPlayRequest(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { if (playSource instanceof FileSource) { PlaySourceInternal playSourceInternal = getPlaySourceInternal((FileSource) playSource); PlayRequest request = new PlayRequest() .setPlaySourceInfo(playSourceInternal) .setPlayTo( playTo .stream() .map(CommunicationIdentifierConverter::convert) .collect(Collectors.toList())); if (options != null) { request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())); request.setOperationContext(options.getOperationContext()); } return request; } throw logger.logExceptionAsError(new IllegalArgumentException(playSource.getClass().getCanonicalName())); } private PlaySourceInternal getPlaySourceInternal(FileSource fileSource) { FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(fileSource.getUri()); PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.FILE) .setFileSource(fileSourceInternal) .setPlaySourceId(fileSource.getPlaySourceId()); return playSourceInternal; } }
class CallMediaAsync { private final ContentsImpl contentsInternal; private final String callConnectionId; private final ClientLogger logger; CallMediaAsync(String callConnectionId, ContentsImpl contentsInternal) { this.callConnectionId = callConnectionId; this.contentsInternal = contentsInternal; this.logger = new ClientLogger(CallMediaAsync.class); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful play request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) { return playWithResponse(playSource, playTo, null).flatMap(FluxUtil::toMono); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> playToAll(PlaySource playSource) { return playToAllWithResponse(playSource, null).flatMap(FluxUtil::toMono); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @param options play options. * @return Response for successful play request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playWithResponse(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { return playWithResponseInternal(playSource, playTo, options, null); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @param options play options. * @return Response for successful playAll request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playToAllWithResponse(PlaySource playSource, PlayOptions options) { return playWithResponseInternal(playSource, Collections.emptyList(), options, null); } /** * Recognize operation. * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Void> recognize(CallMediaRecognizeOptions recognizeOptions) { return recognizeWithResponse(recognizeOptions).then(); } /** * Recognize operation * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Response<Void>> recognizeWithResponse(CallMediaRecognizeOptions recognizeOptions) { return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context)); } Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternal((FileSource) playSource); } } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } } /** * Cancels all the queued media operations. * @return Void */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelAllMediaOperations() { return cancelAllMediaOperationsWithResponse().then(); } /** * Cancels all the queued media operations * @return Response for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() { return cancelAllMediaOperationsWithResponseInternal(null); } Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> playWithResponseInternal(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; PlayRequest request = getPlayRequest(playSource, playTo, options); return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } PlayRequest getPlayRequest(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { if (playSource instanceof FileSource) { PlaySourceInternal playSourceInternal = getPlaySourceInternal((FileSource) playSource); PlayRequest request = new PlayRequest() .setPlaySourceInfo(playSourceInternal) .setPlayTo( playTo .stream() .map(CommunicationIdentifierConverter::convert) .collect(Collectors.toList())); if (options != null) { request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())); request.setOperationContext(options.getOperationContext()); } return request; } throw logger.logExceptionAsError(new IllegalArgumentException(playSource.getClass().getCanonicalName())); } private PlaySourceInternal getPlaySourceInternal(FileSource fileSource) { FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(fileSource.getUri()); PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.FILE) .setFileSource(fileSourceInternal) .setPlaySourceId(fileSource.getPlaySourceId()); return playSourceInternal; } }
Are you saying the old code didn't actually run the tasks in parallel? Or was it limited to a parallelism that might have been lower than requested? Because I though I verified the sync tests ran in parallel as expected with the old code, but we mostly run async tests so this might have been a bug.
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations, (durationSeconds * 1000L) + 100L, TimeUnit.MILLISECONDS); forkJoinPool.shutdown(); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime), false, Math.min(parallel, 1000 / parallel), 1) .then() .block(); } } catch (InterruptedException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.dispose(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
List<Callable<Integer>> operations = new ArrayList<>(parallel);
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations, (durationSeconds * 1000L) + 100L, TimeUnit.MILLISECONDS); forkJoinPool.shutdown(); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime), false, Math.min(parallel, 1000 / parallel), 1) .then() .block(); } } catch (InterruptedException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.dispose(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.dispose(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Disposable recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); try { ForkJoinPool forkJoinPool = new ForkJoinPool(tests.length); forkJoinPool.submit(() -> { IntStream.range(0, tests.length).parallel().forEach(i -> tests[i].postSetupAsync().block()); }).get(); } catch (InterruptedException | ExecutionException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } startedPlayback = true; recordStatus.dispose(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Disposable playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.dispose(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } }
class to execute. * @param options the configuration ro run performance test with. * * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.dispose(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Disposable recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); try { ForkJoinPool forkJoinPool = new ForkJoinPool(tests.length); forkJoinPool.submit(() -> { IntStream.range(0, tests.length).parallel().forEach(i -> tests[i].postSetupAsync().block()); }).get(); } catch (InterruptedException | ExecutionException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } startedPlayback = true; recordStatus.dispose(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Disposable playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.dispose(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } }
if user explicitly mentioned they wanted okhttp, but it's not found on the classpath we should throw rather than return default.
public static HttpClient createInstance(ClientOptions clientOptions) { if (DEFAULT_PROVIDER == null) { throw LOGGER.logExceptionAsError(new IllegalStateException(CANNOT_FIND_HTTP_CLIENT)); } if (clientOptions instanceof HttpClientOptions) { HttpClientOptions httpClientOptions = (HttpClientOptions) clientOptions; String selectedImplementation = httpClientOptions.getHttpClientImplementation(); if (CoreUtils.isNullOrEmpty(selectedImplementation)) { return DEFAULT_PROVIDER.createInstance(httpClientOptions); } else { AVAILABLE_PROVIDERS.getOrDefault(selectedImplementation, DEFAULT_PROVIDER) .createInstance(httpClientOptions); } } return DEFAULT_PROVIDER.createInstance(); }
AVAILABLE_PROVIDERS.getOrDefault(selectedImplementation, DEFAULT_PROVIDER)
public static HttpClient createInstance(ClientOptions clientOptions) { if (DEFAULT_PROVIDER == null) { throw LOGGER.logExceptionAsError(new IllegalStateException(NO_DEFAULT_PROVIDER)); } if (clientOptions instanceof HttpClientOptions) { HttpClientOptions httpClientOptions = (HttpClientOptions) clientOptions; Class<? extends HttpClientProvider> selectedImplementation = httpClientOptions.getHttpClientProvider(); if (selectedImplementation == null && NO_DEFAULT_HTTP_CLIENT_IMPLEMENTATION) { return DEFAULT_PROVIDER.createInstance(httpClientOptions); } else { String implementationName = (selectedImplementation == null) ? DEFAULT_HTTP_CLIENT_IMPLEMENTATION : selectedImplementation.getName(); HttpClientProvider provider = AVAILABLE_PROVIDERS.get(implementationName); if (provider == null) { throw LOGGER.logExceptionAsError( new IllegalStateException(CANNOT_FIND_SPECIFIC_PROVIDER + implementationName)); } else { return provider.createInstance(httpClientOptions); } } } return DEFAULT_PROVIDER.createInstance(); }
class HttpClientProviders { private static final String CANNOT_FIND_HTTP_CLIENT = "A request was made to load the default HttpClient provider " + "but one could not be found on the classpath. If you are using a dependency manager, consider including a " + "dependency on azure-core-http-netty or azure-core-http-okhttp. Depending on your existing dependencies, you " + "have the choice of Netty or OkHttp implementations. Additionally, refer to " + "https: private static final ClientLogger LOGGER = new ClientLogger(HttpClientProviders.class); private static final HttpClientProvider DEFAULT_PROVIDER; private static final Map<String, HttpClientProvider> AVAILABLE_PROVIDERS; static { ServiceLoader<HttpClientProvider> serviceLoader = ServiceLoader.load(HttpClientProvider.class, HttpClientProviders.class.getClassLoader()); AVAILABLE_PROVIDERS = new HashMap<>(); Iterator<HttpClientProvider> it = serviceLoader.iterator(); if (it.hasNext()) { DEFAULT_PROVIDER = it.next(); String defaultProviderName = DEFAULT_PROVIDER.getClass().getName(); AVAILABLE_PROVIDERS.put(defaultProviderName, DEFAULT_PROVIDER); LOGGER.verbose("Using {} as the default HttpClientProvider.", defaultProviderName); } else { DEFAULT_PROVIDER = null; } while (it.hasNext()) { HttpClientProvider additionalProvider = it.next(); String additionalProviderName = additionalProvider.getClass().getName(); AVAILABLE_PROVIDERS.put(additionalProviderName, additionalProvider); LOGGER.verbose("Additional provider found on the classpath: {}", additionalProviderName); } } private HttpClientProviders() { } public static HttpClient createInstance() { return createInstance(null); } }
class HttpClientProviders { private static final String NO_DEFAULT_PROVIDER = "A request was made to load the default HttpClient provider " + "but one could not be found on the classpath. If you are using a dependency manager, consider including a " + "dependency on azure-core-http-netty or azure-core-http-okhttp. Depending on your existing dependencies, you " + "have the choice of Netty or OkHttp implementations. Additionally, refer to " + "https: private static final String CANNOT_FIND_SPECIFIC_PROVIDER = "A request was made to use a specific " + "HttpClientProvider to create an instance of HttpClient but it wasn't found on the classpath. If you're " + "using a dependency manager ensure you're including the dependency that provides the specific " + "implementation. If you're including the specific implementation ensure that the HttpClientProvider service " + "it supplies is being included in the 'META-INF/services' file 'com'azure.core.http.HttpClientProvider'. " + "The requested HttpClientProvider was: "; private static final ClientLogger LOGGER = new ClientLogger(HttpClientProviders.class); private static final HttpClientProvider DEFAULT_PROVIDER; private static final Map<String, HttpClientProvider> AVAILABLE_PROVIDERS; private static final String DEFAULT_HTTP_CLIENT_IMPLEMENTATION; private static final boolean NO_DEFAULT_HTTP_CLIENT_IMPLEMENTATION; static { ServiceLoader<HttpClientProvider> serviceLoader = ServiceLoader.load(HttpClientProvider.class, HttpClientProviders.class.getClassLoader()); AVAILABLE_PROVIDERS = new HashMap<>(); Iterator<HttpClientProvider> it = serviceLoader.iterator(); if (it.hasNext()) { DEFAULT_PROVIDER = it.next(); String defaultProviderName = DEFAULT_PROVIDER.getClass().getName(); AVAILABLE_PROVIDERS.put(defaultProviderName, DEFAULT_PROVIDER); LOGGER.verbose("Using {} as the default HttpClientProvider.", defaultProviderName); } else { DEFAULT_PROVIDER = null; } while (it.hasNext()) { HttpClientProvider additionalProvider = it.next(); String additionalProviderName = additionalProvider.getClass().getName(); AVAILABLE_PROVIDERS.put(additionalProviderName, additionalProvider); LOGGER.verbose("Additional provider found on the classpath: {}", additionalProviderName); } DEFAULT_HTTP_CLIENT_IMPLEMENTATION = Configuration.getGlobalConfiguration() .get(PROPERTY_AZURE_HTTP_CLIENT_IMPLEMENTATION); NO_DEFAULT_HTTP_CLIENT_IMPLEMENTATION = CoreUtils.isNullOrEmpty(DEFAULT_HTTP_CLIENT_IMPLEMENTATION); } private HttpClientProviders() { } public static HttpClient createInstance() { return createInstance(null); } }
The latter, it ran in parallel, but the parallelism level wasn't in our control but in the control of however `Stream.parallel` is handled by the JDK. This change explicitly forces the parallel level we configured.
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations, (durationSeconds * 1000L) + 100L, TimeUnit.MILLISECONDS); forkJoinPool.shutdown(); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime), false, Math.min(parallel, 1000 / parallel), 1) .then() .block(); } } catch (InterruptedException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.dispose(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
List<Callable<Integer>> operations = new ArrayList<>(parallel);
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations, (durationSeconds * 1000L) + 100L, TimeUnit.MILLISECONDS); forkJoinPool.shutdown(); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime), false, Math.min(parallel, 1000 / parallel), 1) .then() .block(); } } catch (InterruptedException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.dispose(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.dispose(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Disposable recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); try { ForkJoinPool forkJoinPool = new ForkJoinPool(tests.length); forkJoinPool.submit(() -> { IntStream.range(0, tests.length).parallel().forEach(i -> tests[i].postSetupAsync().block()); }).get(); } catch (InterruptedException | ExecutionException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } startedPlayback = true; recordStatus.dispose(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Disposable playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.dispose(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } }
class to execute. * @param options the configuration ro run performance test with. * * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.dispose(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Disposable recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); try { ForkJoinPool forkJoinPool = new ForkJoinPool(tests.length); forkJoinPool.submit(() -> { IntStream.range(0, tests.length).parallel().forEach(i -> tests[i].postSetupAsync().block()); }).get(); } catch (InterruptedException | ExecutionException e) { System.err.println("Error occurred when submitting jobs to ForkJoinPool. " + System.lineSeparator() + e); e.printStackTrace(System.err); throw new RuntimeException(e); } startedPlayback = true; recordStatus.dispose(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Disposable playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.dispose(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } }
we shouldn't create the container here if `PersonCrossPartition` is not being used.
public void setUp() throws ClassNotFoundException { if (cosmosTemplate == null) { client = CosmosFactory.createCosmosAsyncClient(cosmosClientBuilder); personInfo = new CosmosEntityInformation<>(Person.class); containerName = personInfo.getContainerName(); cosmosTemplate = createCosmosTemplate(cosmosConfig, TestConstants.DB_NAME); } collectionManager.ensureContainersCreatedAndEmpty(cosmosTemplate, Person.class, PersonCrossPartition.class, GenIdEntity.class, AuditableEntity.class); insertedPerson = cosmosTemplate.insert(Person.class.getSimpleName(), TEST_PERSON, new PartitionKey(TEST_PERSON.getLastName())); }
collectionManager.ensureContainersCreatedAndEmpty(cosmosTemplate, Person.class, PersonCrossPartition.class,
public void setUp() throws ClassNotFoundException { if (cosmosTemplate == null) { client = CosmosFactory.createCosmosAsyncClient(cosmosClientBuilder); personInfo = new CosmosEntityInformation<>(Person.class); containerName = personInfo.getContainerName(); cosmosTemplate = createCosmosTemplate(cosmosConfig, TestConstants.DB_NAME); } collectionManager.ensureContainersCreatedAndEmpty(cosmosTemplate, Person.class, GenIdEntity.class, AuditableEntity.class); insertedPerson = cosmosTemplate.insert(Person.class.getSimpleName(), TEST_PERSON, new PartitionKey(TEST_PERSON.getLastName())); }
class CosmosTemplateIT { private static final Person TEST_PERSON = new Person(ID_1, FIRST_NAME, LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); private static final Person TEST_PERSON_2 = new Person(ID_2, NEW_FIRST_NAME, NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); private static final Person TEST_PERSON_3 = new Person(ID_3, NEW_FIRST_NAME, NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); private static final String PRECONDITION_IS_NOT_MET = "is not met"; private static final String WRONG_ETAG = "WRONG_ETAG"; @ClassRule public static final IntegrationTestCollectionManager collectionManager = new IntegrationTestCollectionManager(); private static CosmosAsyncClient client; private static CosmosTemplate cosmosTemplate; private static CosmosEntityInformation<Person, String> personInfo; private static String containerName; private Person insertedPerson; @Autowired private ApplicationContext applicationContext; @Autowired private CosmosClientBuilder cosmosClientBuilder; @Autowired private CosmosConfig cosmosConfig; @Autowired private ResponseDiagnosticsTestUtils responseDiagnosticsTestUtils; @Autowired private AuditableRepository auditableRepository; @Before private CosmosTemplate createCosmosTemplate(CosmosConfig config, String dbName) throws ClassNotFoundException { final CosmosFactory cosmosFactory = new CosmosFactory(client, dbName); final CosmosMappingContext mappingContext = new CosmosMappingContext(); mappingContext.setInitialEntitySet(new EntityScanner(this.applicationContext).scan(Persistent.class)); final MappingCosmosConverter cosmosConverter = new MappingCosmosConverter(mappingContext, null); return new CosmosTemplate(cosmosFactory, config, cosmosConverter); } private void insertPerson(Person person) { cosmosTemplate.insert(person, new PartitionKey(personInfo.getPartitionKeyFieldValue(person))); } @Test public void testInsertDuplicateIdShouldFailWithConflictException() { try { cosmosTemplate.insert(Person.class.getSimpleName(), TEST_PERSON, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON))); fail(); } catch (CosmosAccessException ex) { assertThat(ex.getCosmosException()).isInstanceOf(ConflictException.class); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); } } @Test(expected = CosmosAccessException.class) public void testInsertShouldFailIfColumnNotAnnotatedWithAutoGenerate() { final Person person = new Person(null, FIRST_NAME, LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(Person.class.getSimpleName(), person, new PartitionKey(person.getLastName())); } @Test public void testInsertShouldGenerateIdIfColumnAnnotatedWithAutoGenerate() { final GenIdEntity entity = new GenIdEntity(null, "foo"); final GenIdEntity insertedEntity = cosmosTemplate.insert(GenIdEntity.class.getSimpleName(), entity, null); assertThat(insertedEntity.getId()).isNotNull(); } @Test public void testFindAll() { final List<Person> result = TestUtils.toList(cosmosTemplate.findAll(Person.class.getSimpleName(), Person.class)); assertThat(result.size()).isEqualTo(1); assertThat(result.get(0)).isEqualTo(TEST_PERSON); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindById() { final Person result = cosmosTemplate.findById(Person.class.getSimpleName(), TEST_PERSON.getId(), Person.class); assertEquals(result, TEST_PERSON); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final Person nullResult = cosmosTemplate.findById(Person.class.getSimpleName(), NOT_EXIST_ID, Person.class); assertThat(nullResult).isNull(); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); } @Test public void testFindByMultiIds() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); final List<Object> ids = Lists.newArrayList(ID_1, ID_2, ID_3); final List<Person> result = TestUtils.toList(cosmosTemplate.findByIds(ids, Person.class, containerName)); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final List<Person> expected = Lists.newArrayList(TEST_PERSON, TEST_PERSON_2, TEST_PERSON_3); assertThat(result.size()).isEqualTo(expected.size()); assertThat(result).containsAll(expected); } @Test public void testUpsertNewDocument() { cosmosTemplate.deleteById(Person.class.getSimpleName(), TEST_PERSON.getId(), new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON))); final String firstName = NEW_FIRST_NAME + "_" + UUID.randomUUID(); final Person newPerson = new Person(TEST_PERSON.getId(), firstName, NEW_FIRST_NAME, null, null, AGE, PASSPORT_IDS_BY_COUNTRY); final Person person = cosmosTemplate.upsertAndReturnEntity(Person.class.getSimpleName(), newPerson); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); assertEquals(person.getFirstName(), firstName); } @Test public void testUpdateWithReturnEntity() { final Person updated = new Person(TEST_PERSON.getId(), UPDATED_FIRST_NAME, TEST_PERSON.getLastName(), TEST_PERSON.getHobbies(), TEST_PERSON.getShippingAddresses(), AGE, PASSPORT_IDS_BY_COUNTRY); updated.set_etag(insertedPerson.get_etag()); final Person updatedPerson = cosmosTemplate.upsertAndReturnEntity(Person.class.getSimpleName(), updated); final Person findPersonById = cosmosTemplate.findById(Person.class.getSimpleName(), updatedPerson.getId(), Person.class); assertEquals(updatedPerson, updated); assertThat(updatedPerson.get_etag()).isEqualTo(findPersonById.get_etag()); } @Test public void testUpdate() { final Person updated = new Person(TEST_PERSON.getId(), UPDATED_FIRST_NAME, TEST_PERSON.getLastName(), TEST_PERSON.getHobbies(), TEST_PERSON.getShippingAddresses(), AGE, PASSPORT_IDS_BY_COUNTRY); updated.set_etag(insertedPerson.get_etag()); final Person person = cosmosTemplate.upsertAndReturnEntity(Person.class.getSimpleName(), updated); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); assertEquals(person, updated); } @Test public void testOptimisticLockWhenUpdatingWithWrongEtag() { final Person updated = new Person(TEST_PERSON.getId(), UPDATED_FIRST_NAME, TEST_PERSON.getLastName(), TEST_PERSON.getHobbies(), TEST_PERSON.getShippingAddresses(), AGE, PASSPORT_IDS_BY_COUNTRY); updated.set_etag(WRONG_ETAG); try { cosmosTemplate.upsert(Person.class.getSimpleName(), updated); } catch (CosmosAccessException e) { assertThat(e.getCosmosException()).isNotNull(); final Throwable cosmosClientException = e.getCosmosException(); assertThat(cosmosClientException).isInstanceOf(CosmosException.class); assertThat(cosmosClientException.getMessage()).contains(PRECONDITION_IS_NOT_MET); assertThat(responseDiagnosticsTestUtils.getDiagnostics()).isNotNull(); final Person unmodifiedPerson = cosmosTemplate.findById(Person.class.getSimpleName(), TEST_PERSON.getId(), Person.class); assertThat(unmodifiedPerson.getFirstName()).isEqualTo(insertedPerson.getFirstName()); return; } fail(); } @Test public void testDeleteById() { cosmosTemplate.insert(TEST_PERSON_2, null); assertThat(cosmosTemplate.count(Person.class.getSimpleName())).isEqualTo(2); cosmosTemplate.deleteById(Person.class.getSimpleName(), TEST_PERSON.getId(), new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final List<Person> result = TestUtils.toList(cosmosTemplate.findAll(Person.class)); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); assertThat(result.size()).isEqualTo(1); assertEquals(result.get(0), TEST_PERSON_2); } @Test public void testDeleteByEntity() { Person insertedPerson = cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(TEST_PERSON_2.getLastName())); assertThat(cosmosTemplate.count(Person.class.getSimpleName())).isEqualTo(2); cosmosTemplate.deleteEntity(Person.class.getSimpleName(), insertedPerson); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final List<Person> result = TestUtils.toList(cosmosTemplate.findAll(Person.class)); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); assertThat(result.size()).isEqualTo(1); assertEquals(result.get(0), TEST_PERSON); } @Test public void testCountByContainer() { final long prevCount = cosmosTemplate.count(containerName); assertThat(prevCount).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); final long newCount = cosmosTemplate.count(containerName); assertThat(newCount).isEqualTo(2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testCountByQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON_2.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final long count = cosmosTemplate.count(query, containerName); assertThat(count).isEqualTo(1); final Criteria criteriaIgnoreCase = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON_2.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); final CosmosQuery queryIgnoreCase = new CosmosQuery(criteriaIgnoreCase); final long countIgnoreCase = cosmosTemplate.count(queryIgnoreCase, containerName); assertThat(countIgnoreCase).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindAllPageableMultiPages() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final CosmosPageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_1, null); final Page<Person> page1 = cosmosTemplate.findAll(pageRequest, Person.class, containerName); assertThat(page1.getContent().size()).isEqualTo(PAGE_SIZE_1); PageTestUtils.validateNonLastPage(page1, PAGE_SIZE_1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final Page<Person> page2 = cosmosTemplate.findAll(page1.nextPageable(), Person.class, containerName); assertThat(page2.getContent().size()).isEqualTo(1); PageTestUtils.validateLastPage(page2, PAGE_SIZE_1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindAllPageableMultiPagesPageSizeTwo() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final CosmosPageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final Page<Person> page1 = cosmosTemplate.findAll(pageRequest, Person.class, containerName); final List<Person> resultPage1 = TestUtils.toList(page1); final List<Person> expected = Lists.newArrayList(TEST_PERSON, TEST_PERSON_2); assertThat(resultPage1.size()).isEqualTo(expected.size()); assertThat(resultPage1).containsAll(expected); PageTestUtils.validateNonLastPage(page1, PAGE_SIZE_2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final Page<Person> page2 = cosmosTemplate.findAll(page1.nextPageable(), Person.class, containerName); final List<Person> resultPage2 = TestUtils.toList(page2); final List<Person> expected2 = Lists.newArrayList(TEST_PERSON_3); assertThat(resultPage2.size()).isEqualTo(expected2.size()); assertThat(resultPage2).containsAll(expected2); PageTestUtils.validateLastPage(page2, PAGE_SIZE_2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testPaginationQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME), Part.IgnoreCaseType.NEVER); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final CosmosQuery query = new CosmosQuery(criteria).with(pageRequest); final Page<Person> page = cosmosTemplate.paginationQuery(query, Person.class, containerName); assertThat(page.getContent().size()).isEqualTo(1); PageTestUtils.validateLastPage(page, PAGE_SIZE_2); final Criteria criteriaIgnoreCase = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME.toUpperCase()), Part.IgnoreCaseType.ALWAYS); final CosmosQuery queryIgnoreCase = new CosmosQuery(criteriaIgnoreCase).with(pageRequest); final Page<Person> pageIgnoreCase = cosmosTemplate.paginationQuery(queryIgnoreCase, Person.class, containerName); assertThat(pageIgnoreCase.getContent().size()).isEqualTo(1); PageTestUtils.validateLastPage(pageIgnoreCase, PAGE_SIZE_2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindWithSortAndLimit() { final Person testPerson4 = new Person("id_4", "fred", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson5 = new Person("id_5", "barney", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson6 = new Person("id_6", "george", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); insertPerson(testPerson4); insertPerson(testPerson5); insertPerson(testPerson6); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "lastName", Collections.singletonList(NEW_LAST_NAME), Part.IgnoreCaseType.ALWAYS); final CosmosQuery query = new CosmosQuery(criteria); query.with(Sort.by(Sort.Direction.ASC, "firstName")); final List<Person> result = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(result.size()).isEqualTo(3); assertThat(result.get(0).getFirstName()).isEqualTo("barney"); assertThat(result.get(1).getFirstName()).isEqualTo("fred"); assertThat(result.get(2).getFirstName()).isEqualTo("george"); query.withLimit(1); final List<Person> resultWithLimit = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(resultWithLimit.size()).isEqualTo(1); assertThat(resultWithLimit.get(0).getFirstName()).isEqualTo("barney"); } @Test public void testFindWithOffsetAndLimit() { final Person testPerson4 = new Person("id_4", "fred", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson5 = new Person("id_5", "barney", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson6 = new Person("id_6", "george", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); insertPerson(testPerson4); insertPerson(testPerson5); insertPerson(testPerson6); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "lastName", Collections.singletonList(NEW_LAST_NAME), Part.IgnoreCaseType.ALWAYS); final CosmosQuery query = new CosmosQuery(criteria); query.with(Sort.by(Sort.Direction.ASC, "firstName")); final List<Person> result = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(result.size()).isEqualTo(3); assertThat(result.get(0).getFirstName()).isEqualTo("barney"); assertThat(result.get(1).getFirstName()).isEqualTo("fred"); assertThat(result.get(2).getFirstName()).isEqualTo("george"); query.withOffsetAndLimit(1, 1); final List<Person> resultWithLimit = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(resultWithLimit.size()).isEqualTo(1); assertThat(resultWithLimit.get(0).getFirstName()).isEqualTo("fred"); } @Test public void testFindAllWithPageableAndSort() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Sort sort = Sort.by(Sort.Direction.DESC, "firstName"); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_3, null, sort); final Page<Person> page = cosmosTemplate.findAll(pageRequest, Person.class, containerName); assertThat(page.getContent().size()).isEqualTo(3); PageTestUtils.validateLastPage(page, PAGE_SIZE_3); final List<Person> result = page.getContent(); assertThat(result.get(0).getFirstName()).isEqualTo(NEW_FIRST_NAME); assertThat(result.get(1).getFirstName()).isEqualTo(NEW_FIRST_NAME); assertThat(result.get(2).getFirstName()).isEqualTo(FIRST_NAME); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindAllWithTwoPagesAndVerifySortOrder() { final Person testPerson4 = new Person("id_4", "barney", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson5 = new Person("id_5", "fred", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); cosmosTemplate.insert(testPerson4, new PartitionKey(personInfo.getPartitionKeyFieldValue(testPerson4))); cosmosTemplate.insert(testPerson5, new PartitionKey(personInfo.getPartitionKeyFieldValue(testPerson5))); final Sort sort = Sort.by(Sort.Direction.ASC, "firstName"); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_3, null, sort); final Page<Person> firstPage = cosmosTemplate.findAll(pageRequest, Person.class, containerName); assertThat(firstPage.getContent().size()).isEqualTo(3); PageTestUtils.validateNonLastPage(firstPage, firstPage.getContent().size()); final List<Person> firstPageResults = firstPage.getContent(); assertThat(firstPageResults.get(0).getFirstName()).isEqualTo(testPerson4.getFirstName()); assertThat(firstPageResults.get(1).getFirstName()).isEqualTo(FIRST_NAME); assertThat(firstPageResults.get(2).getFirstName()).isEqualTo(testPerson5.getFirstName()); final Page<Person> secondPage = cosmosTemplate.findAll(firstPage.nextPageable(), Person.class, containerName); assertThat(secondPage.getContent().size()).isEqualTo(2); PageTestUtils.validateLastPage(secondPage, PAGE_SIZE_3); final List<Person> secondPageResults = secondPage.getContent(); assertThat(secondPageResults.get(0).getFirstName()).isEqualTo(NEW_FIRST_NAME); assertThat(secondPageResults.get(1).getFirstName()).isEqualTo(NEW_FIRST_NAME); } @Test public void testExists() { final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final Boolean exists = cosmosTemplate.exists(query, Person.class, containerName); assertThat(exists).isTrue(); final Criteria criteriaIgnoreCase = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); final CosmosQuery queryIgnoreCase = new CosmosQuery(criteriaIgnoreCase); final Boolean existsIgnoreCase = cosmosTemplate.exists(queryIgnoreCase, Person.class, containerName); assertThat(existsIgnoreCase).isTrue(); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testArrayContainsCriteria() { Criteria hasHobby = Criteria.getInstance(CriteriaType.ARRAY_CONTAINS, "hobbies", Collections.singletonList(HOBBY1), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(hasHobby), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testContainsCriteria() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Person TEST_PERSON_4 = new Person("id-4", "NEW_FIRST_NAME", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(TEST_PERSON_4, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_4))); Criteria containsCaseSensitive = Criteria.getInstance(CriteriaType.CONTAINING, "firstName", Collections.singletonList("first"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON, TEST_PERSON_2, TEST_PERSON_3); Criteria containsNotCaseSensitive = Criteria.getInstance(CriteriaType.CONTAINING, "firstName", Collections.singletonList("first"), Part.IgnoreCaseType.ALWAYS); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsNotCaseSensitive), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON, TEST_PERSON_2, TEST_PERSON_3, TEST_PERSON_4); } @Test public void testContainsCriteria2() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Criteria containsCaseSensitive = Criteria.getInstance(CriteriaType.CONTAINING, "id", Collections.singletonList("1"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); Criteria containsCaseSensitive2 = Criteria.getInstance(CriteriaType.CONTAINING, "id", Collections.singletonList("2"), Part.IgnoreCaseType.NEVER); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive2), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON_2); Criteria containsCaseSensitive3 = Criteria.getInstance(CriteriaType.CONTAINING, "id", Collections.singletonList("3"), Part.IgnoreCaseType.NEVER); List<Person> people3 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive3), Person.class, containerName)); assertThat(people3).containsExactly(TEST_PERSON_3); } @Test public void testNotContainsCriteria() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Person TEST_PERSON_4 = new Person("id-4", "NEW_FIRST_NAME", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(TEST_PERSON_4, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_4))); Criteria notContainsCaseSensitive = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "firstName", Collections.singletonList("li"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON_2, TEST_PERSON_3, TEST_PERSON_4); Criteria notContainsNotCaseSensitive = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "firstName", Collections.singletonList("new"), Part.IgnoreCaseType.ALWAYS); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsNotCaseSensitive), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON); } @Test public void testNotContainsCriteria2() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Criteria notContainsCaseSensitive = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "id", Collections.singletonList("1"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON_2, TEST_PERSON_3); Criteria notContainsCaseSensitive2 = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "id", Collections.singletonList("2"), Part.IgnoreCaseType.NEVER); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive2), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON, TEST_PERSON_3); Criteria notContainsCaseSensitive3 = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "id", Collections.singletonList("3"), Part.IgnoreCaseType.NEVER); List<Person> people3 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive3), Person.class, containerName)); assertThat(people3).containsExactly(TEST_PERSON, TEST_PERSON_2); } @Test public void testIsNotNullCriteriaCaseSensitive() { Criteria hasLastName = Criteria.getInstance(CriteriaType.IS_NOT_NULL, "lastName", Collections.emptyList(), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(hasLastName), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testStartsWithCriteriaCaseSensitive() { Criteria nameStartsWith = Criteria.getInstance(CriteriaType.STARTS_WITH, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(nameStartsWith), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testIsEqualCriteriaCaseSensitive() { Criteria nameStartsWith = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(nameStartsWith), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testStringEqualsCriteriaCaseSensitive() { Criteria nameStartsWith = Criteria.getInstance(CriteriaType.STRING_EQUALS, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(nameStartsWith), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testBetweenCriteria() { Criteria ageBetween = Criteria.getInstance(CriteriaType.BETWEEN, "age", Arrays.asList(AGE - 1, AGE + 1), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(ageBetween), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testFindWithEqualCriteriaContainingNestedProperty() { String postalCode = ADDRESSES.get(0).getPostalCode(); String subjectWithNestedProperty = "shippingAddresses[0]['postalCode']"; Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, subjectWithNestedProperty, Collections.singletonList(postalCode), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(criteria), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testRunQueryWithEqualCriteriaContainingSpaces() { String usaPassportId = PASSPORT_IDS_BY_COUNTRY.get("United States of America"); String subjectWithSpaces = "passportIdsByCountry['United States of America']"; Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, subjectWithSpaces, Collections.singletonList(usaPassportId), Part.IgnoreCaseType.NEVER); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(criteria)); List<Person> people = TestUtils.toList(cosmosTemplate.runQuery(sqlQuerySpec, Person.class, Person.class)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testRunQueryWithSimpleReturnType() { Criteria ageBetween = Criteria.getInstance(CriteriaType.BETWEEN, "age", Arrays.asList(AGE - 1, AGE + 1), Part.IgnoreCaseType.NEVER); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(ageBetween)); List<Person> people = TestUtils.toList(cosmosTemplate.runQuery(sqlQuerySpec, Person.class, Person.class)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testRunQueryWithReturnTypeContainingLocalDateTime() { final AuditableEntity entity = new AuditableEntity(); entity.setId(UUID.randomUUID().toString()); auditableRepository.save(entity); Criteria equals = Criteria.getInstance(CriteriaType.IS_EQUAL, "id", Collections.singletonList(entity.getId()), Part.IgnoreCaseType.NEVER); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(equals)); List<AuditableEntity> results = TestUtils.toList(cosmosTemplate.runQuery(sqlQuerySpec, AuditableEntity.class, AuditableEntity.class)); assertEquals(results.size(), 1); AuditableEntity foundEntity = results.get(0); assertEquals(entity.getId(), foundEntity.getId()); assertNotNull(foundEntity.getCreatedDate()); assertNotNull(foundEntity.getLastModifiedByDate()); } @Test public void testSliceQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME), Part.IgnoreCaseType.NEVER); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final CosmosQuery query = new CosmosQuery(criteria).with(pageRequest); final Slice<Person> slice = cosmosTemplate.sliceQuery(query, Person.class, containerName); assertThat(slice.getContent().size()).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testRunSliceQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME), Part.IgnoreCaseType.NEVER); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(criteria)); final Slice<Person> slice = cosmosTemplate.runSliceQuery(sqlQuerySpec, pageRequest, Person.class, Person.class); assertThat(slice.getContent().size()).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void createWithAutoscale() throws ClassNotFoundException { final CosmosEntityInformation<AutoScaleSample, String> autoScaleSampleInfo = new CosmosEntityInformation<>(AutoScaleSample.class); CosmosContainerProperties containerProperties = cosmosTemplate.createContainerIfNotExists(autoScaleSampleInfo); assertNotNull(containerProperties); ThroughputResponse throughput = client.getDatabase(TestConstants.DB_NAME) .getContainer(autoScaleSampleInfo.getContainerName()) .readThroughput() .block(); assertNotNull(throughput); assertEquals(Integer.parseInt(TestConstants.AUTOSCALE_MAX_THROUGHPUT), throughput.getProperties().getAutoscaleMaxThroughput()); } @Test public void createDatabaseWithThroughput() throws ClassNotFoundException { final String configuredThroughputDbName = TestConstants.DB_NAME + "-configured-throughput"; deleteDatabaseIfExists(configuredThroughputDbName); Integer expectedRequestUnits = 700; final CosmosConfig config = CosmosConfig.builder() .enableDatabaseThroughput(false, expectedRequestUnits) .build(); final CosmosTemplate configuredThroughputCosmosTemplate = createCosmosTemplate(config, configuredThroughputDbName); final CosmosEntityInformation<Person, String> personInfo = new CosmosEntityInformation<>(Person.class); configuredThroughputCosmosTemplate.createContainerIfNotExists(personInfo); final CosmosAsyncDatabase database = client.getDatabase(configuredThroughputDbName); final ThroughputResponse response = database.readThroughput().block(); assertEquals(expectedRequestUnits, response.getProperties().getManualThroughput()); } @Test public void queryWithMaxDegreeOfParallelism() throws ClassNotFoundException { final CosmosConfig config = CosmosConfig.builder() .maxDegreeOfParallelism(20) .build(); final CosmosTemplate maxDegreeOfParallelismCosmosTemplate = createCosmosTemplate(config, TestConstants.DB_NAME); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final long count = maxDegreeOfParallelismCosmosTemplate.count(query, containerName); assertEquals((int) ReflectionTestUtils.getField(maxDegreeOfParallelismCosmosTemplate, "maxDegreeOfParallelism"), 20); } @Test public void queryDatabaseWithQueryMerticsEnabled() throws ClassNotFoundException { final CosmosConfig config = CosmosConfig.builder() .enableQueryMetrics(true) .build(); final CosmosTemplate queryMetricsEnabledCosmosTemplate = createCosmosTemplate(config, TestConstants.DB_NAME); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final long count = queryMetricsEnabledCosmosTemplate.count(query, containerName); assertEquals((boolean) ReflectionTestUtils.getField(queryMetricsEnabledCosmosTemplate, "queryMetricsEnabled"), true); } @Test public void userAgentSpringDataCosmosSuffix() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Method getUserAgentSuffix = CosmosClientBuilder.class.getDeclaredMethod("getUserAgentSuffix"); getUserAgentSuffix.setAccessible(true); String userAgentSuffix = (String) getUserAgentSuffix.invoke(cosmosClientBuilder); assertThat(userAgentSuffix).contains(Constants.USER_AGENT_SUFFIX); assertThat(userAgentSuffix).contains(PropertyLoader.getProjectVersion()); } private void deleteDatabaseIfExists(String dbName) { CosmosAsyncDatabase database = client.getDatabase(dbName); try { database.delete().block(); } catch (CosmosException ex) { assertEquals(ex.getStatusCode(), 404); } } }
class CosmosTemplateIT { private static final Person TEST_PERSON = new Person(ID_1, FIRST_NAME, LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); private static final Person TEST_PERSON_2 = new Person(ID_2, NEW_FIRST_NAME, NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); private static final Person TEST_PERSON_3 = new Person(ID_3, NEW_FIRST_NAME, NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); private static final String PRECONDITION_IS_NOT_MET = "is not met"; private static final String WRONG_ETAG = "WRONG_ETAG"; @ClassRule public static final IntegrationTestCollectionManager collectionManager = new IntegrationTestCollectionManager(); private static CosmosAsyncClient client; private static CosmosTemplate cosmosTemplate; private static CosmosEntityInformation<Person, String> personInfo; private static String containerName; private Person insertedPerson; @Autowired private ApplicationContext applicationContext; @Autowired private CosmosClientBuilder cosmosClientBuilder; @Autowired private CosmosConfig cosmosConfig; @Autowired private ResponseDiagnosticsTestUtils responseDiagnosticsTestUtils; @Autowired private AuditableRepository auditableRepository; @Before private CosmosTemplate createCosmosTemplate(CosmosConfig config, String dbName) throws ClassNotFoundException { final CosmosFactory cosmosFactory = new CosmosFactory(client, dbName); final CosmosMappingContext mappingContext = new CosmosMappingContext(); mappingContext.setInitialEntitySet(new EntityScanner(this.applicationContext).scan(Persistent.class)); final MappingCosmosConverter cosmosConverter = new MappingCosmosConverter(mappingContext, null); return new CosmosTemplate(cosmosFactory, config, cosmosConverter); } private void insertPerson(Person person) { cosmosTemplate.insert(person, new PartitionKey(personInfo.getPartitionKeyFieldValue(person))); } @Test public void testInsertDuplicateIdShouldFailWithConflictException() { try { cosmosTemplate.insert(Person.class.getSimpleName(), TEST_PERSON, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON))); fail(); } catch (CosmosAccessException ex) { assertThat(ex.getCosmosException()).isInstanceOf(ConflictException.class); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); } } @Test(expected = CosmosAccessException.class) public void testInsertShouldFailIfColumnNotAnnotatedWithAutoGenerate() { final Person person = new Person(null, FIRST_NAME, LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(Person.class.getSimpleName(), person, new PartitionKey(person.getLastName())); } @Test public void testInsertShouldGenerateIdIfColumnAnnotatedWithAutoGenerate() { final GenIdEntity entity = new GenIdEntity(null, "foo"); final GenIdEntity insertedEntity = cosmosTemplate.insert(GenIdEntity.class.getSimpleName(), entity, null); assertThat(insertedEntity.getId()).isNotNull(); } @Test public void testFindAll() { final List<Person> result = TestUtils.toList(cosmosTemplate.findAll(Person.class.getSimpleName(), Person.class)); assertThat(result.size()).isEqualTo(1); assertThat(result.get(0)).isEqualTo(TEST_PERSON); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindById() { final Person result = cosmosTemplate.findById(Person.class.getSimpleName(), TEST_PERSON.getId(), Person.class); assertEquals(result, TEST_PERSON); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final Person nullResult = cosmosTemplate.findById(Person.class.getSimpleName(), NOT_EXIST_ID, Person.class); assertThat(nullResult).isNull(); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); } @Test public void testFindByMultiIds() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); final List<Object> ids = Lists.newArrayList(ID_1, ID_2, ID_3); final List<Person> result = TestUtils.toList(cosmosTemplate.findByIds(ids, Person.class, containerName)); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final List<Person> expected = Lists.newArrayList(TEST_PERSON, TEST_PERSON_2, TEST_PERSON_3); assertThat(result.size()).isEqualTo(expected.size()); assertThat(result).containsAll(expected); } @Test public void testUpsertNewDocument() { cosmosTemplate.deleteById(Person.class.getSimpleName(), TEST_PERSON.getId(), new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON))); final String firstName = NEW_FIRST_NAME + "_" + UUID.randomUUID(); final Person newPerson = new Person(TEST_PERSON.getId(), firstName, NEW_FIRST_NAME, null, null, AGE, PASSPORT_IDS_BY_COUNTRY); final Person person = cosmosTemplate.upsertAndReturnEntity(Person.class.getSimpleName(), newPerson); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); assertEquals(person.getFirstName(), firstName); } @Test public void testUpdateWithReturnEntity() { final Person updated = new Person(TEST_PERSON.getId(), UPDATED_FIRST_NAME, TEST_PERSON.getLastName(), TEST_PERSON.getHobbies(), TEST_PERSON.getShippingAddresses(), AGE, PASSPORT_IDS_BY_COUNTRY); updated.set_etag(insertedPerson.get_etag()); final Person updatedPerson = cosmosTemplate.upsertAndReturnEntity(Person.class.getSimpleName(), updated); final Person findPersonById = cosmosTemplate.findById(Person.class.getSimpleName(), updatedPerson.getId(), Person.class); assertEquals(updatedPerson, updated); assertThat(updatedPerson.get_etag()).isEqualTo(findPersonById.get_etag()); } @Test public void testUpdate() { final Person updated = new Person(TEST_PERSON.getId(), UPDATED_FIRST_NAME, TEST_PERSON.getLastName(), TEST_PERSON.getHobbies(), TEST_PERSON.getShippingAddresses(), AGE, PASSPORT_IDS_BY_COUNTRY); updated.set_etag(insertedPerson.get_etag()); final Person person = cosmosTemplate.upsertAndReturnEntity(Person.class.getSimpleName(), updated); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); assertEquals(person, updated); } @Test public void testOptimisticLockWhenUpdatingWithWrongEtag() { final Person updated = new Person(TEST_PERSON.getId(), UPDATED_FIRST_NAME, TEST_PERSON.getLastName(), TEST_PERSON.getHobbies(), TEST_PERSON.getShippingAddresses(), AGE, PASSPORT_IDS_BY_COUNTRY); updated.set_etag(WRONG_ETAG); try { cosmosTemplate.upsert(Person.class.getSimpleName(), updated); } catch (CosmosAccessException e) { assertThat(e.getCosmosException()).isNotNull(); final Throwable cosmosClientException = e.getCosmosException(); assertThat(cosmosClientException).isInstanceOf(CosmosException.class); assertThat(cosmosClientException.getMessage()).contains(PRECONDITION_IS_NOT_MET); assertThat(responseDiagnosticsTestUtils.getDiagnostics()).isNotNull(); final Person unmodifiedPerson = cosmosTemplate.findById(Person.class.getSimpleName(), TEST_PERSON.getId(), Person.class); assertThat(unmodifiedPerson.getFirstName()).isEqualTo(insertedPerson.getFirstName()); return; } fail(); } @Test public void testDeleteById() { cosmosTemplate.insert(TEST_PERSON_2, null); assertThat(cosmosTemplate.count(Person.class.getSimpleName())).isEqualTo(2); cosmosTemplate.deleteById(Person.class.getSimpleName(), TEST_PERSON.getId(), new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final List<Person> result = TestUtils.toList(cosmosTemplate.findAll(Person.class)); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); assertThat(result.size()).isEqualTo(1); assertEquals(result.get(0), TEST_PERSON_2); } @Test public void testDeleteByEntity() { Person insertedPerson = cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(TEST_PERSON_2.getLastName())); assertThat(cosmosTemplate.count(Person.class.getSimpleName())).isEqualTo(2); cosmosTemplate.deleteEntity(Person.class.getSimpleName(), insertedPerson); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final List<Person> result = TestUtils.toList(cosmosTemplate.findAll(Person.class)); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); assertThat(result.size()).isEqualTo(1); assertEquals(result.get(0), TEST_PERSON); } @Test public void testCountByContainer() { final long prevCount = cosmosTemplate.count(containerName); assertThat(prevCount).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); final long newCount = cosmosTemplate.count(containerName); assertThat(newCount).isEqualTo(2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testCountByQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON_2.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final long count = cosmosTemplate.count(query, containerName); assertThat(count).isEqualTo(1); final Criteria criteriaIgnoreCase = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON_2.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); final CosmosQuery queryIgnoreCase = new CosmosQuery(criteriaIgnoreCase); final long countIgnoreCase = cosmosTemplate.count(queryIgnoreCase, containerName); assertThat(countIgnoreCase).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindAllPageableMultiPages() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final CosmosPageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_1, null); final Page<Person> page1 = cosmosTemplate.findAll(pageRequest, Person.class, containerName); assertThat(page1.getContent().size()).isEqualTo(PAGE_SIZE_1); PageTestUtils.validateNonLastPage(page1, PAGE_SIZE_1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final Page<Person> page2 = cosmosTemplate.findAll(page1.nextPageable(), Person.class, containerName); assertThat(page2.getContent().size()).isEqualTo(1); PageTestUtils.validateLastPage(page2, PAGE_SIZE_1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindAllPageableMultiPagesPageSizeTwo() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final CosmosPageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final Page<Person> page1 = cosmosTemplate.findAll(pageRequest, Person.class, containerName); final List<Person> resultPage1 = TestUtils.toList(page1); final List<Person> expected = Lists.newArrayList(TEST_PERSON, TEST_PERSON_2); assertThat(resultPage1.size()).isEqualTo(expected.size()); assertThat(resultPage1).containsAll(expected); PageTestUtils.validateNonLastPage(page1, PAGE_SIZE_2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); final Page<Person> page2 = cosmosTemplate.findAll(page1.nextPageable(), Person.class, containerName); final List<Person> resultPage2 = TestUtils.toList(page2); final List<Person> expected2 = Lists.newArrayList(TEST_PERSON_3); assertThat(resultPage2.size()).isEqualTo(expected2.size()); assertThat(resultPage2).containsAll(expected2); PageTestUtils.validateLastPage(page2, PAGE_SIZE_2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testPaginationQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME), Part.IgnoreCaseType.NEVER); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final CosmosQuery query = new CosmosQuery(criteria).with(pageRequest); final Page<Person> page = cosmosTemplate.paginationQuery(query, Person.class, containerName); assertThat(page.getContent().size()).isEqualTo(1); PageTestUtils.validateLastPage(page, PAGE_SIZE_2); final Criteria criteriaIgnoreCase = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME.toUpperCase()), Part.IgnoreCaseType.ALWAYS); final CosmosQuery queryIgnoreCase = new CosmosQuery(criteriaIgnoreCase).with(pageRequest); final Page<Person> pageIgnoreCase = cosmosTemplate.paginationQuery(queryIgnoreCase, Person.class, containerName); assertThat(pageIgnoreCase.getContent().size()).isEqualTo(1); PageTestUtils.validateLastPage(pageIgnoreCase, PAGE_SIZE_2); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindWithSortAndLimit() { final Person testPerson4 = new Person("id_4", "fred", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson5 = new Person("id_5", "barney", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson6 = new Person("id_6", "george", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); insertPerson(testPerson4); insertPerson(testPerson5); insertPerson(testPerson6); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "lastName", Collections.singletonList(NEW_LAST_NAME), Part.IgnoreCaseType.ALWAYS); final CosmosQuery query = new CosmosQuery(criteria); query.with(Sort.by(Sort.Direction.ASC, "firstName")); final List<Person> result = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(result.size()).isEqualTo(3); assertThat(result.get(0).getFirstName()).isEqualTo("barney"); assertThat(result.get(1).getFirstName()).isEqualTo("fred"); assertThat(result.get(2).getFirstName()).isEqualTo("george"); query.withLimit(1); final List<Person> resultWithLimit = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(resultWithLimit.size()).isEqualTo(1); assertThat(resultWithLimit.get(0).getFirstName()).isEqualTo("barney"); } @Test public void testFindWithOffsetAndLimit() { final Person testPerson4 = new Person("id_4", "fred", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson5 = new Person("id_5", "barney", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson6 = new Person("id_6", "george", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); insertPerson(testPerson4); insertPerson(testPerson5); insertPerson(testPerson6); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "lastName", Collections.singletonList(NEW_LAST_NAME), Part.IgnoreCaseType.ALWAYS); final CosmosQuery query = new CosmosQuery(criteria); query.with(Sort.by(Sort.Direction.ASC, "firstName")); final List<Person> result = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(result.size()).isEqualTo(3); assertThat(result.get(0).getFirstName()).isEqualTo("barney"); assertThat(result.get(1).getFirstName()).isEqualTo("fred"); assertThat(result.get(2).getFirstName()).isEqualTo("george"); query.withOffsetAndLimit(1, 1); final List<Person> resultWithLimit = TestUtils.toList(cosmosTemplate.find(query, Person.class, containerName)); assertThat(resultWithLimit.size()).isEqualTo(1); assertThat(resultWithLimit.get(0).getFirstName()).isEqualTo("fred"); } @Test public void testFindAllWithPageableAndSort() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Sort sort = Sort.by(Sort.Direction.DESC, "firstName"); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_3, null, sort); final Page<Person> page = cosmosTemplate.findAll(pageRequest, Person.class, containerName); assertThat(page.getContent().size()).isEqualTo(3); PageTestUtils.validateLastPage(page, PAGE_SIZE_3); final List<Person> result = page.getContent(); assertThat(result.get(0).getFirstName()).isEqualTo(NEW_FIRST_NAME); assertThat(result.get(1).getFirstName()).isEqualTo(NEW_FIRST_NAME); assertThat(result.get(2).getFirstName()).isEqualTo(FIRST_NAME); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testFindAllWithTwoPagesAndVerifySortOrder() { final Person testPerson4 = new Person("id_4", "barney", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); final Person testPerson5 = new Person("id_5", "fred", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); cosmosTemplate.insert(testPerson4, new PartitionKey(personInfo.getPartitionKeyFieldValue(testPerson4))); cosmosTemplate.insert(testPerson5, new PartitionKey(personInfo.getPartitionKeyFieldValue(testPerson5))); final Sort sort = Sort.by(Sort.Direction.ASC, "firstName"); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_3, null, sort); final Page<Person> firstPage = cosmosTemplate.findAll(pageRequest, Person.class, containerName); assertThat(firstPage.getContent().size()).isEqualTo(3); PageTestUtils.validateNonLastPage(firstPage, firstPage.getContent().size()); final List<Person> firstPageResults = firstPage.getContent(); assertThat(firstPageResults.get(0).getFirstName()).isEqualTo(testPerson4.getFirstName()); assertThat(firstPageResults.get(1).getFirstName()).isEqualTo(FIRST_NAME); assertThat(firstPageResults.get(2).getFirstName()).isEqualTo(testPerson5.getFirstName()); final Page<Person> secondPage = cosmosTemplate.findAll(firstPage.nextPageable(), Person.class, containerName); assertThat(secondPage.getContent().size()).isEqualTo(2); PageTestUtils.validateLastPage(secondPage, PAGE_SIZE_3); final List<Person> secondPageResults = secondPage.getContent(); assertThat(secondPageResults.get(0).getFirstName()).isEqualTo(NEW_FIRST_NAME); assertThat(secondPageResults.get(1).getFirstName()).isEqualTo(NEW_FIRST_NAME); } @Test public void testExists() { final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final Boolean exists = cosmosTemplate.exists(query, Person.class, containerName); assertThat(exists).isTrue(); final Criteria criteriaIgnoreCase = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); final CosmosQuery queryIgnoreCase = new CosmosQuery(criteriaIgnoreCase); final Boolean existsIgnoreCase = cosmosTemplate.exists(queryIgnoreCase, Person.class, containerName); assertThat(existsIgnoreCase).isTrue(); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testArrayContainsCriteria() { Criteria hasHobby = Criteria.getInstance(CriteriaType.ARRAY_CONTAINS, "hobbies", Collections.singletonList(HOBBY1), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(hasHobby), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testContainsCriteria() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Person TEST_PERSON_4 = new Person("id-4", "NEW_FIRST_NAME", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(TEST_PERSON_4, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_4))); Criteria containsCaseSensitive = Criteria.getInstance(CriteriaType.CONTAINING, "firstName", Collections.singletonList("first"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON, TEST_PERSON_2, TEST_PERSON_3); Criteria containsNotCaseSensitive = Criteria.getInstance(CriteriaType.CONTAINING, "firstName", Collections.singletonList("first"), Part.IgnoreCaseType.ALWAYS); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsNotCaseSensitive), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON, TEST_PERSON_2, TEST_PERSON_3, TEST_PERSON_4); } @Test public void testContainsCriteria2() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Criteria containsCaseSensitive = Criteria.getInstance(CriteriaType.CONTAINING, "id", Collections.singletonList("1"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); Criteria containsCaseSensitive2 = Criteria.getInstance(CriteriaType.CONTAINING, "id", Collections.singletonList("2"), Part.IgnoreCaseType.NEVER); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive2), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON_2); Criteria containsCaseSensitive3 = Criteria.getInstance(CriteriaType.CONTAINING, "id", Collections.singletonList("3"), Part.IgnoreCaseType.NEVER); List<Person> people3 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(containsCaseSensitive3), Person.class, containerName)); assertThat(people3).containsExactly(TEST_PERSON_3); } @Test public void testNotContainsCriteria() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Person TEST_PERSON_4 = new Person("id-4", "NEW_FIRST_NAME", NEW_LAST_NAME, HOBBIES, ADDRESSES, AGE, PASSPORT_IDS_BY_COUNTRY); cosmosTemplate.insert(TEST_PERSON_4, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_4))); Criteria notContainsCaseSensitive = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "firstName", Collections.singletonList("li"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON_2, TEST_PERSON_3, TEST_PERSON_4); Criteria notContainsNotCaseSensitive = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "firstName", Collections.singletonList("new"), Part.IgnoreCaseType.ALWAYS); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsNotCaseSensitive), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON); } @Test public void testNotContainsCriteria2() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); cosmosTemplate.insert(TEST_PERSON_3, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_3))); Criteria notContainsCaseSensitive = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "id", Collections.singletonList("1"), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON_2, TEST_PERSON_3); Criteria notContainsCaseSensitive2 = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "id", Collections.singletonList("2"), Part.IgnoreCaseType.NEVER); List<Person> people2 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive2), Person.class, containerName)); assertThat(people2).containsExactly(TEST_PERSON, TEST_PERSON_3); Criteria notContainsCaseSensitive3 = Criteria.getInstance(CriteriaType.NOT_CONTAINING, "id", Collections.singletonList("3"), Part.IgnoreCaseType.NEVER); List<Person> people3 = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(notContainsCaseSensitive3), Person.class, containerName)); assertThat(people3).containsExactly(TEST_PERSON, TEST_PERSON_2); } @Test public void testIsNotNullCriteriaCaseSensitive() { Criteria hasLastName = Criteria.getInstance(CriteriaType.IS_NOT_NULL, "lastName", Collections.emptyList(), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(hasLastName), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testStartsWithCriteriaCaseSensitive() { Criteria nameStartsWith = Criteria.getInstance(CriteriaType.STARTS_WITH, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(nameStartsWith), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testIsEqualCriteriaCaseSensitive() { Criteria nameStartsWith = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(nameStartsWith), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testStringEqualsCriteriaCaseSensitive() { Criteria nameStartsWith = Criteria.getInstance(CriteriaType.STRING_EQUALS, "firstName", Collections.singletonList(TEST_PERSON.getFirstName().toUpperCase()), Part.IgnoreCaseType.ALWAYS); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(nameStartsWith), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testBetweenCriteria() { Criteria ageBetween = Criteria.getInstance(CriteriaType.BETWEEN, "age", Arrays.asList(AGE - 1, AGE + 1), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(ageBetween), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testFindWithEqualCriteriaContainingNestedProperty() { String postalCode = ADDRESSES.get(0).getPostalCode(); String subjectWithNestedProperty = "shippingAddresses[0]['postalCode']"; Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, subjectWithNestedProperty, Collections.singletonList(postalCode), Part.IgnoreCaseType.NEVER); List<Person> people = TestUtils.toList(cosmosTemplate.find(new CosmosQuery(criteria), Person.class, containerName)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testRunQueryWithEqualCriteriaContainingSpaces() { String usaPassportId = PASSPORT_IDS_BY_COUNTRY.get("United States of America"); String subjectWithSpaces = "passportIdsByCountry['United States of America']"; Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, subjectWithSpaces, Collections.singletonList(usaPassportId), Part.IgnoreCaseType.NEVER); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(criteria)); List<Person> people = TestUtils.toList(cosmosTemplate.runQuery(sqlQuerySpec, Person.class, Person.class)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testRunQueryWithSimpleReturnType() { Criteria ageBetween = Criteria.getInstance(CriteriaType.BETWEEN, "age", Arrays.asList(AGE - 1, AGE + 1), Part.IgnoreCaseType.NEVER); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(ageBetween)); List<Person> people = TestUtils.toList(cosmosTemplate.runQuery(sqlQuerySpec, Person.class, Person.class)); assertThat(people).containsExactly(TEST_PERSON); } @Test public void testRunQueryWithReturnTypeContainingLocalDateTime() { final AuditableEntity entity = new AuditableEntity(); entity.setId(UUID.randomUUID().toString()); auditableRepository.save(entity); Criteria equals = Criteria.getInstance(CriteriaType.IS_EQUAL, "id", Collections.singletonList(entity.getId()), Part.IgnoreCaseType.NEVER); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(equals)); List<AuditableEntity> results = TestUtils.toList(cosmosTemplate.runQuery(sqlQuerySpec, AuditableEntity.class, AuditableEntity.class)); assertEquals(results.size(), 1); AuditableEntity foundEntity = results.get(0); assertEquals(entity.getId(), foundEntity.getId()); assertNotNull(foundEntity.getCreatedDate()); assertNotNull(foundEntity.getLastModifiedByDate()); } @Test public void testSliceQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME), Part.IgnoreCaseType.NEVER); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final CosmosQuery query = new CosmosQuery(criteria).with(pageRequest); final Slice<Person> slice = cosmosTemplate.sliceQuery(query, Person.class, containerName); assertThat(slice.getContent().size()).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void testRunSliceQuery() { cosmosTemplate.insert(TEST_PERSON_2, new PartitionKey(personInfo.getPartitionKeyFieldValue(TEST_PERSON_2))); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNull(); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(FIRST_NAME), Part.IgnoreCaseType.NEVER); final PageRequest pageRequest = new CosmosPageRequest(0, PAGE_SIZE_2, null); final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(new CosmosQuery(criteria)); final Slice<Person> slice = cosmosTemplate.runSliceQuery(sqlQuerySpec, pageRequest, Person.class, Person.class); assertThat(slice.getContent().size()).isEqualTo(1); assertThat(responseDiagnosticsTestUtils.getCosmosDiagnostics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics()).isNotNull(); assertThat(responseDiagnosticsTestUtils.getCosmosResponseStatistics().getRequestCharge()).isGreaterThan(0); } @Test public void createWithAutoscale() throws ClassNotFoundException { final CosmosEntityInformation<AutoScaleSample, String> autoScaleSampleInfo = new CosmosEntityInformation<>(AutoScaleSample.class); CosmosContainerProperties containerProperties = cosmosTemplate.createContainerIfNotExists(autoScaleSampleInfo); assertNotNull(containerProperties); ThroughputResponse throughput = client.getDatabase(TestConstants.DB_NAME) .getContainer(autoScaleSampleInfo.getContainerName()) .readThroughput() .block(); assertNotNull(throughput); assertEquals(Integer.parseInt(TestConstants.AUTOSCALE_MAX_THROUGHPUT), throughput.getProperties().getAutoscaleMaxThroughput()); } @Test public void createDatabaseWithThroughput() throws ClassNotFoundException { final String configuredThroughputDbName = TestConstants.DB_NAME + "-configured-throughput"; deleteDatabaseIfExists(configuredThroughputDbName); Integer expectedRequestUnits = 700; final CosmosConfig config = CosmosConfig.builder() .enableDatabaseThroughput(false, expectedRequestUnits) .build(); final CosmosTemplate configuredThroughputCosmosTemplate = createCosmosTemplate(config, configuredThroughputDbName); final CosmosEntityInformation<Person, String> personInfo = new CosmosEntityInformation<>(Person.class); configuredThroughputCosmosTemplate.createContainerIfNotExists(personInfo); final CosmosAsyncDatabase database = client.getDatabase(configuredThroughputDbName); final ThroughputResponse response = database.readThroughput().block(); assertEquals(expectedRequestUnits, response.getProperties().getManualThroughput()); } @Test public void queryWithMaxDegreeOfParallelism() throws ClassNotFoundException { final CosmosConfig config = CosmosConfig.builder() .maxDegreeOfParallelism(20) .build(); final CosmosTemplate maxDegreeOfParallelismCosmosTemplate = createCosmosTemplate(config, TestConstants.DB_NAME); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final long count = maxDegreeOfParallelismCosmosTemplate.count(query, containerName); assertEquals((int) ReflectionTestUtils.getField(maxDegreeOfParallelismCosmosTemplate, "maxDegreeOfParallelism"), 20); } @Test public void queryDatabaseWithQueryMerticsEnabled() throws ClassNotFoundException { final CosmosConfig config = CosmosConfig.builder() .enableQueryMetrics(true) .build(); final CosmosTemplate queryMetricsEnabledCosmosTemplate = createCosmosTemplate(config, TestConstants.DB_NAME); final Criteria criteria = Criteria.getInstance(CriteriaType.IS_EQUAL, "firstName", Collections.singletonList(TEST_PERSON.getFirstName()), Part.IgnoreCaseType.NEVER); final CosmosQuery query = new CosmosQuery(criteria); final long count = queryMetricsEnabledCosmosTemplate.count(query, containerName); assertEquals((boolean) ReflectionTestUtils.getField(queryMetricsEnabledCosmosTemplate, "queryMetricsEnabled"), true); } @Test public void userAgentSpringDataCosmosSuffix() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Method getUserAgentSuffix = CosmosClientBuilder.class.getDeclaredMethod("getUserAgentSuffix"); getUserAgentSuffix.setAccessible(true); String userAgentSuffix = (String) getUserAgentSuffix.invoke(cosmosClientBuilder); assertThat(userAgentSuffix).contains(Constants.USER_AGENT_SUFFIX); assertThat(userAgentSuffix).contains(PropertyLoader.getProjectVersion()); } private void deleteDatabaseIfExists(String dbName) { CosmosAsyncDatabase database = client.getDatabase(dbName); try { database.delete().block(); } catch (CosmosException ex) { assertEquals(ex.getStatusCode(), 404); } } }
nit: do we need the package prefix?
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.info("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>();
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseClient(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) { List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null) .collectList() .block(); if (res.size() != 0) { CosmosAsyncDatabase database = client.getDatabase(databaseId); database.read().block(); return database; } else { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.debug("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.debug("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } static protected void safeClose(CosmosAsyncClient client) { if (client != null) { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeCloseAsync(CosmosAsyncClient client) { if (client != null) { new Thread(() -> { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } }).start(); } } static protected void safeCloseClient(CosmosAsyncClient client) { if (client != null) { try { logger.info("closing client ..."); client.close(); logger.info("closing client completed"); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) { if (database != null) { List<CosmosContainerProperties> collections = database.readAllContainers() .collectList() .block(); for (CosmosContainerProperties collection : collections) { database.getContainer(collection.getId()).delete().block(); } } } static protected void safeDeleteCollection(CosmosAsyncContainer collection) { if (collection != null) { try { collection.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) { if (database != null && collectionId != null) { try { database.getContainer(collectionId).delete().block(); } catch (Exception e) { } } } static protected void safeDeleteDatabase(CosmosAsyncDatabase database) { if (database != null) { try { database.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteSyncDatabase(CosmosDatabase database) { if (database != null) { try { logger.info("attempting to delete database ...."); database.delete(); logger.info("database deletion completed"); } catch (Exception e) { logger.error("failed to delete sync database", e); } } } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseAsync(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } }
nit: name method to getTopics
private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = getTopicPropertiesList(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); }
final List<TopicProperties> entities = getTopicPropertiesList(feed);
new RuntimeException("Could not parse response into FeedPage<TopicDescription>", error)); } }); } /** * Checks if the given entity is an absolute URL, if so return it. * Otherwise, construct the URL from the given entity and return that. * * @param entity : entity to forward messages to. * * @return Forward to Entity represented as an absolute URL */ private String getAbsoluteUrlFromEntity(String entity) { try { URL url = new URL(entity); return url.toString(); } catch (MalformedURLException ex) { } UrlBuilder urlBuilder = new UrlBuilder(); urlBuilder.setScheme("https"); urlBuilder.setHost(managementClient.getEndpoint()); urlBuilder.setPath(entity); try { URL url = urlBuilder.toUrl(); return url.toString(); } catch (MalformedURLException ex) { LOGGER.error("Failed to construct URL using the endpoint:'{}' and entity:'{}'", managementClient.getEndpoint(), entity); LOGGER.logThrowableAsError(ex); } return null; }
class ServiceBusAdministrationAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(Response::getValue); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} or {@code queueOptions} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(Response::getValue); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} * are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(Response::getValue); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(Response::getValue); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(Response::getValue); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(Response::getValue); } /** * Gets whether a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(Response::getValue); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(Response::getValue); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(Response::getValue); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(Response::getValue); } /** * Gets whether a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(Response::getValue); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(Response::getValue); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(Response::getValue); } /** * Gets whether a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(Response::getValue); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>(() -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(LOGGER, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(LOGGER, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(LOGGER, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(LOGGER, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(this::listTopicsFirstPage), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(Response::getValue); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(Response::getValue); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(Response::getValue); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(Response::getValue); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (CoreUtils.isNullOrEmpty(queueName)) { return monoError(LOGGER, new IllegalArgumentException("'queueName' cannot be empty.")); } if (createQueueOptions == null) { return monoError(LOGGER, new NullPointerException("'createQueueOptions' cannot be null.")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = getTracingContext(context.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders())); final String forwardTo = getForwardToEntity(createQueueOptions.getForwardTo(), contextWithHeaders); if (forwardTo != null) { createQueueOptions.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(createQueueOptions.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { createQueueOptions.setForwardDeadLetteredMessagesTo(forwardDlq); } final CreateQueueBody createEntity = getCreateQueueBody(EntityHelper.getQueueDescription(createQueueOptions)); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new NullPointerException("'subscriptionName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(ruleName)) { return monoError(LOGGER, new NullPointerException("'ruleName' cannot be null or empty.")); } if (ruleOptions == null) { return monoError(LOGGER, new NullPointerException("'ruleOptions' cannot be null.")); } final CreateRuleBody createEntity = getCreateRuleBody(ruleName, ruleOptions); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new NullPointerException("'subscriptionName' cannot be null or empty.")); } if (subscriptionOptions == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'subscriptionOptions' cannot be null.")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = getTracingContext(context.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders())); final String forwardTo = getForwardToEntity(subscriptionOptions.getForwardTo(), contextWithHeaders); if (forwardTo != null) { subscriptionOptions.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(subscriptionOptions.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { subscriptionOptions.setForwardDeadLetteredMessagesTo(forwardDlq); } final CreateSubscriptionBody createEntity = getCreateSubscriptionBody(EntityHelper.getSubscriptionDescription(subscriptionOptions)); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } if (topicOptions == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'topicOptions' cannot be null.")); } final CreateTopicBody createEntity = getCreateTopicBody(EntityHelper.getTopicDescription(topicOptions)); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (CoreUtils.isNullOrEmpty(queueName)) { return monoError(LOGGER, new IllegalArgumentException("'queueName' cannot be null or empty.")); } try { return entityClient.deleteWithResponseAsync(queueName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new NullPointerException("'subscriptionName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(ruleName)) { return monoError(LOGGER, new NullPointerException("'ruleName' cannot be null or empty.")); } try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new NullPointerException("'subscriptionName' cannot be null or empty.")); } try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } try { return entityClient.deleteWithResponseAsync(topicName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (CoreUtils.isNullOrEmpty(queueName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } try { return entityClient.getWithResponseAsync(queueName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityHelper.EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new NullPointerException("'subscriptionName' cannot be null or empty.")); } try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityHelper.EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new NullPointerException("'topicName' cannot be null or empty.")); } try { return entityClient.getWithResponseAsync(topicName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityHelper.EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { try { return listSubscriptions(topicName, 0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { try { return listTopics(0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listTopics(skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(LOGGER, new NullPointerException("'queue' cannot be null")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = getTracingContext(context.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders())); final String forwardTo = getForwardToEntity(queue.getForwardTo(), contextWithHeaders); if (forwardTo != null) { queue.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(queue.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { queue.setForwardDeadLetteredMessagesTo(forwardDlq); } final CreateQueueBody createEntity = getCreateQueueBody(EntityHelper.toImplementation(queue)); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(LOGGER, new NullPointerException("'rule' cannot be null")); } final CreateRuleBody ruleBody = getUpdateRuleBody(rule); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { try { return listQueues(0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listQueues(skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { try { return listRules(topicName, subscriptionName, 0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(LOGGER, new NullPointerException("'subscription' cannot be null")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardTo = getForwardToEntity(subscription.getForwardTo(), contextWithHeaders); if (forwardTo != null) { subscription.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(subscription.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { subscription.setForwardDeadLetteredMessagesTo(forwardDlq); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final CreateSubscriptionBody createEntity = getCreateSubscriptionBody(EntityHelper.toImplementation(subscription)); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(LOGGER, new NullPointerException("'topic' cannot be null")); } final CreateTopicBody createEntity = getUpdateTopicBody(topic); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { LOGGER.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { LOGGER.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); return getRulePropertiesSimpleResponse(response, entry); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); return getSubscriptionPropertiesSimpleResponse(topicName, response, entry); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { LOGGER.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { LOGGER.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = getQueuePropertiesList(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = getRulePropertiesList(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = getSubscriptionPropertiesList(topicName, feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = getTopicPropertiesList(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error( /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } private String getForwardDlqEntity(String forwardDlqToEntity, Context contextWithHeaders) { if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); return getAbsoluteUrlFromEntity(forwardDlqToEntity); } return null; } private String getForwardToEntity(String forwardToEntity, Context contextWithHeaders) { if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); return getAbsoluteUrlFromEntity(forwardToEntity); } return null; } }
class ServiceBusAdministrationAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(ServiceBusAdministrationAsyncClient.class); private final ServiceBusManagementClientImpl managementClient; private final EntitiesImpl entityClient; private final ServiceBusManagementSerializer serializer; private final RulesImpl rulesClient; /** * Creates a new instance with the given management client and serializer. * * @param managementClient Client to make management calls. * @param serializer Serializer to deserialize ATOM XML responses. * * @throws NullPointerException if any one of {@code managementClient, serializer, credential} is null. */ ServiceBusAdministrationAsyncClient(ServiceBusManagementClientImpl managementClient, ServiceBusManagementSerializer serializer) { this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.managementClient = Objects.requireNonNull(managementClient, "'managementClient' cannot be null."); this.entityClient = managementClient.getEntities(); this.rulesClient = managementClient.getRules(); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName) { try { return createQueue(queueName, new CreateQueueOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a queue with the {@link CreateQueueOptions} and given queue name. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that completes with information about the created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or is an empty string. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> createQueue(String queueName, CreateQueueOptions queueOptions) { return createQueueWithResponse(queueName, queueOptions).map(response -> response.getValue()); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Options about the queue to create. * * @return A Mono that returns the created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or is an empty string. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions) { return withContext(context -> createQueueWithResponse(queueName, queueOptions, context)); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are null or empty strings. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName) { try { return createRule(topicName, subscriptionName, ruleName, new CreateRuleOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that completes with information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are null or empty strings. * @throws NullPointerException {@code ruleOptions} are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> createRule(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions) .map(response -> response.getValue()); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return A Mono that returns the created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are null or empty strings. * @throws NullPointerException if {@code ruleOptions} is null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions) { return withContext(context -> createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context)); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are null or empty strings. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName) { try { return createSubscription(topicName, subscriptionName, new CreateSubscriptionOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a subscription with the {@link CreateSubscriptionOptions}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that completes with information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are null or empty strings. * @throws NullPointerException if {@code subscriptionOptions} is null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions) .map(response -> response.getValue()); } /** * Creates a subscription and returns the created subscription in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return A Mono that returns the created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are null or empty strings. * @throws NullPointerException if {@code subscriptionOptions} is null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return withContext(context -> createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context)); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName) { try { return createTopic(topicName, new CreateTopicOptions()); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that completes with information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws NullPointerException if {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> createTopic(String topicName, CreateTopicOptions topicOptions) { return createTopicWithResponse(topicName, topicOptions).map(response -> response.getValue()); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions The options used to create the topic. * * @return A Mono that returns the created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions) { return withContext(context -> createTopicWithResponse(topicName, topicOptions, context)); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteQueue(String queueName) { return deleteQueueWithResponse(queueName).then(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * * @return A Mono that completes when the queue is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or is an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteQueueWithResponse(String queueName) { return withContext(context -> deleteQueueWithResponse(queueName, context)); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null or * an empty string. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteRule(String topicName, String subscriptionName, String ruleName) { return deleteRuleWithResponse(topicName, subscriptionName, ruleName).then(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @return A Mono that completes when the rule is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null or * an empty string. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> deleteRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is null or an empty string. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteSubscription(String topicName, String subscriptionName) { return deleteSubscriptionWithResponse(topicName, subscriptionName).then(); } /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @return A Mono that completes when the subscription is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is null or an empty string. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> deleteSubscriptionWithResponse(topicName, subscriptionName, context)); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteTopic(String topicName) { return deleteTopicWithResponse(topicName).then(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * * @return A Mono that completes when the topic is deleted and returns the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteTopicWithResponse(String topicName) { return withContext(context -> deleteTopicWithResponse(topicName, context)); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> getQueue(String queueName) { return getQueueWithResponse(queueName).map(response -> response.getValue()); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> getQueueWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, Function.identity())); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether the queue exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getQueueExists(String queueName) { return getQueueExistsWithResponse(queueName).map(response -> response.getValue()); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return A Mono that completes indicating whether or not the queue exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getQueueExistsWithResponse(String queueName) { return getEntityExistsWithResponse(getQueueWithResponse(queueName)); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueRuntimeProperties> getQueueRuntimeProperties(String queueName) { return getQueueRuntimePropertiesWithResponse(queueName).map(response -> response.getValue()); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * * @return A Mono that completes with runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is null or an empty string. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueRuntimeProperties>> getQueueRuntimePropertiesWithResponse(String queueName) { return withContext(context -> getQueueWithResponse(queueName, context, QueueRuntimeProperties::new)); } /** * Gets information about the Service Bus namespace. * * @return A Mono that completes with information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NamespaceProperties> getNamespaceProperties() { return getNamespacePropertiesWithResponse().map(response -> response.getValue()); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @return A Mono that completes with information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse() { return withContext(this::getNamespacePropertiesWithResponse); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> getRule(String topicName, String subscriptionName, String ruleName) { return getRuleWithResponse(topicName, subscriptionName, ruleName).map(response -> response.getValue()); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName) { return withContext(context -> getRuleWithResponse(topicName, subscriptionName, ruleName, context)); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are null or empty strings. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> getSubscription(String topicName, String subscriptionName) { return getSubscriptionWithResponse(topicName, subscriptionName).map(response -> response.getValue()); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are null or empty strings. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> getSubscriptionWithResponse(String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, Function.identity())); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether or not the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getSubscriptionExists(String topicName, String subscriptionName) { return getSubscriptionExistsWithResponse(topicName, subscriptionName).map(response -> response.getValue()); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return A Mono that completes indicating whether the subscription exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getSubscriptionExistsWithResponse(String topicName, String subscriptionName) { return getEntityExistsWithResponse(getSubscriptionWithResponse(topicName, subscriptionName)); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are null or empty strings. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionRuntimeProperties> getSubscriptionRuntimeProperties( String topicName, String subscriptionName) { return getSubscriptionRuntimePropertiesWithResponse(topicName, subscriptionName) .map(response -> response.getValue()); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return A Mono that completes with runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is null or an empty string. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionRuntimeProperties>> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName) { return withContext(context -> getSubscriptionWithResponse(topicName, subscriptionName, context, SubscriptionRuntimeProperties::new)); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> getTopic(String topicName) { return getTopicWithResponse(topicName).map(response -> response.getValue()); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> getTopicWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, Function.identity())); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether or not the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Boolean> getTopicExists(String topicName) { return getTopicExistsWithResponse(topicName).map(response -> response.getValue()); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return A Mono that completes indicating whether the topic exists along with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> getTopicExistsWithResponse(String topicName) { return getEntityExistsWithResponse(getTopicWithResponse(topicName)); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicRuntimeProperties> getTopicRuntimeProperties(String topicName) { return getTopicRuntimePropertiesWithResponse(topicName).map(response -> response.getValue()); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * * @return A Mono that completes with runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicRuntimeProperties>> getTopicRuntimePropertiesWithResponse(String topicName) { return withContext(context -> getTopicWithResponse(topicName, context, TopicRuntimeProperties::new)); } /** * Fetches all the queues in the Service Bus namespace. * * @return A Flux of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<QueueProperties> listQueues() { return new PagedFlux<>(() -> withContext(context -> listQueuesFirstPage(context)), token -> withContext(context -> listQueuesNextPage(token, context))); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return A Flux of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is null or an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<RuleProperties> listRules(String topicName, String subscriptionName) { if (topicName == null) { return pagedFluxError(LOGGER, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(LOGGER, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listRulesFirstPage(topicName, subscriptionName, context)), token -> withContext(context -> listRulesNextPage(topicName, subscriptionName, token, context))); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A Flux of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws IllegalArgumentException if {@code topicName} is null or an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<SubscriptionProperties> listSubscriptions(String topicName) { if (topicName == null) { return pagedFluxError(LOGGER, new NullPointerException("'topicName' cannot be null.")); } else if (topicName.isEmpty()) { return pagedFluxError(LOGGER, new IllegalArgumentException("'topicName' cannot be an empty string.")); } return new PagedFlux<>( () -> withContext(context -> listSubscriptionsFirstPage(topicName, context)), token -> withContext(context -> listSubscriptionsNextPage(topicName, token, context))); } /** * Fetches all the topics in the Service Bus namespace. * * @return A Flux of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<TopicProperties> listTopics() { return new PagedFlux<>( () -> withContext(context -> listTopicsFirstPage(context)), token -> withContext(context -> listTopicsNextPage(token, context))); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<QueueProperties> updateQueue(QueueProperties queue) { return updateQueueWithResponse(queue).map(response -> response.getValue()); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue) { return withContext(context -> updateQueueWithResponse(queue, context)); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<RuleProperties> updateRule(String topicName, String subscriptionName, RuleProperties rule) { return updateRuleWithResponse(topicName, subscriptionName, rule).map(response -> response.getValue()); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule) { return withContext(context -> updateRuleWithResponse(topicName, subscriptionName, rule, context)); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<SubscriptionProperties> updateSubscription(SubscriptionProperties subscription) { return updateSubscriptionWithResponse(subscription).map(response -> response.getValue()); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that returns the updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse( SubscriptionProperties subscription) { return withContext(context -> updateSubscriptionWithResponse(subscription, context)); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<TopicProperties> updateTopic(TopicProperties topic) { return updateTopicWithResponse(topic).map(response -> response.getValue()); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return A Mono that completes with the updated topic and its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic) { return withContext(context -> updateTopicWithResponse(topic, context)); } /** * Creates a queue with its context. * * @param createQueueOptions Queue to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<QueueProperties>> createQueueWithResponse(String queueName, CreateQueueOptions createQueueOptions, Context context) { if (CoreUtils.isNullOrEmpty(queueName)) { return monoError(LOGGER, new IllegalArgumentException("'queueName' cannot be null or empty.")); } if (createQueueOptions == null) { return monoError(LOGGER, new NullPointerException("'createQueueOptions' cannot be null.")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = getTracingContext(context.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders())); final String forwardTo = getForwardToEntity(createQueueOptions.getForwardTo(), contextWithHeaders); if (forwardTo != null) { createQueueOptions.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(createQueueOptions.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { createQueueOptions.setForwardDeadLetteredMessagesTo(forwardDlq); } final CreateQueueBody createEntity = getCreateQueueBody(EntityHelper.getQueueDescription(createQueueOptions)); try { return entityClient.putWithResponseAsync(queueName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a rule with its context. * * @param ruleOptions Rule to create. * @param context Context to pass into request. * * * @return A Mono that completes with the created {@link RuleProperties}. */ Mono<Response<RuleProperties>> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new IllegalArgumentException("'subscriptionName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(ruleName)) { return monoError(LOGGER, new IllegalArgumentException("'ruleName' cannot be null or empty.")); } if (ruleOptions == null) { return monoError(LOGGER, new NullPointerException("'ruleOptions' cannot be null.")); } final CreateRuleBody createEntity = getCreateRuleBody(ruleName, ruleOptions); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, ruleName, createEntity, null, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a subscription with its context. * * @param subscriptionOptions Subscription to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new IllegalArgumentException("'subscriptionName' cannot be null or empty.")); } if (subscriptionOptions == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'subscriptionOptions' cannot be null.")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = getTracingContext(context.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders())); final String forwardTo = getForwardToEntity(subscriptionOptions.getForwardTo(), contextWithHeaders); if (forwardTo != null) { subscriptionOptions.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(subscriptionOptions.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { subscriptionOptions.setForwardDeadLetteredMessagesTo(forwardDlq); } final CreateSubscriptionBody createEntity = getCreateSubscriptionBody(EntityHelper.getSubscriptionDescription(subscriptionOptions)); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, null, contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a topicOptions with its context. * * @param topicOptions Topic to create. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<TopicProperties>> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } if (topicOptions == null) { throw LOGGER.logExceptionAsError(new NullPointerException("'topicOptions' cannot be null.")); } final CreateTopicBody createEntity = getCreateTopicBody(EntityHelper.getTopicDescription(topicOptions)); try { return entityClient.putWithResponseAsync(topicName, createEntity, null, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a queue with its context. * * @param queueName Name of queue to delete. * @param context Context to pass into request. * * @return A Mono that completes when the queue is deleted. */ Mono<Response<Void>> deleteQueueWithResponse(String queueName, Context context) { if (CoreUtils.isNullOrEmpty(queueName)) { return monoError(LOGGER, new IllegalArgumentException("'queueName' cannot be null or empty.")); } try { return entityClient.deleteWithResponseAsync(queueName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a queue with its context. * * @param topicName Name of topic to delete. * @param subscriptionName Name of the subscription for the rule. * @param ruleName Name of the rule. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link QueueProperties}. */ Mono<Response<Void>> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new IllegalArgumentException("'subscriptionName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(ruleName)) { return monoError(LOGGER, new IllegalArgumentException("'ruleName' cannot be null or empty.")); } try { return rulesClient.deleteWithResponseAsync(topicName, subscriptionName, ruleName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a subscription with its context. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link SubscriptionProperties}. */ Mono<Response<Void>> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new IllegalArgumentException("'subscriptionName' cannot be null or empty.")); } try { return managementClient.getSubscriptions().deleteWithResponseAsync(topicName, subscriptionName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Deletes a topic with its context. * * @param topicName Name of topic to delete. * @param context Context to pass into request. * * @return A Mono that completes with the created {@link TopicProperties}. */ Mono<Response<Void>> deleteTopicWithResponse(String topicName, Context context) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } try { return entityClient.deleteWithResponseAsync(topicName, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets whether an entity exists. * * @param getEntityOperation Operation to get information about entity. If {@link ResourceNotFoundException} is * thrown, then it is mapped to false. * @param <T> Entity type. * * @return True if the entity exists, false otherwise. */ <T> Mono<Response<Boolean>> getEntityExistsWithResponse(Mono<Response<T>> getEntityOperation) { return getEntityOperation.map(response -> { final boolean exists = response.getValue() != null; return (Response<Boolean>) new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), exists); }) .onErrorResume(ResourceNotFoundException.class, exception -> { final HttpResponse response = exception.getResponse(); final Response<Boolean> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false); return Mono.just(result); }); } /** * Gets a queue with its context. * * @param queueName Name of queue to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link QueueProperties}. */ <T> Mono<Response<T>> getQueueWithResponse(String queueName, Context context, Function<QueueProperties, T> mapper) { if (CoreUtils.isNullOrEmpty(queueName)) { return monoError(LOGGER, new IllegalArgumentException("'queueName' cannot be null or empty.")); } try { return entityClient.getWithResponseAsync(queueName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<QueueProperties> deserialize = deserializeQueue(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityHelper.EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Queue '%s' does not exist.", queueName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } Mono<Response<RuleProperties>> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { try { return rulesClient.getWithResponseAsync(topicName, subscriptionName, ruleName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets a subscription with its context. * * @param topicName Name of the topic associated with the subscription. * @param subscriptionName Name of subscription to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link SubscriptionProperties}. */ <T> Mono<Response<T>> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context, Function<SubscriptionProperties, T> mapper) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } if (CoreUtils.isNullOrEmpty(subscriptionName)) { return monoError(LOGGER, new IllegalArgumentException("'subscriptionName' cannot be null or empty.")); } try { return managementClient.getSubscriptions().getWithResponseAsync(topicName, subscriptionName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<SubscriptionProperties> deserialize = deserializeSubscription(topicName, response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityHelper.EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format( "Subscription '%s' in topic '%s' does not exist.", topicName, subscriptionName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets the namespace properties with its context. * * @param context Context to pass into request. * * @return A Mono that completes with the {@link NamespaceProperties}. */ Mono<Response<NamespaceProperties>> getNamespacePropertiesWithResponse(Context context) { return managementClient.getNamespaces().getWithResponseAsync(context).handle((response, sink) -> { final NamespacePropertiesEntry entry = response.getValue(); if (entry == null || entry.getContent() == null) { sink.error(new AzureException( "There was no content inside namespace response. Entry: " + response)); return; } final NamespaceProperties namespaceProperties = entry.getContent().getNamespaceProperties(); final Response<NamespaceProperties> result = new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), namespaceProperties); sink.next(result); }); } /** * Gets a topic with its context. * * @param topicName Name of topic to fetch information for. * @param context Context to pass into request. * * @return A Mono that completes with the {@link TopicProperties}. */ <T> Mono<Response<T>> getTopicWithResponse(String topicName, Context context, Function<TopicProperties, T> mapper) { if (CoreUtils.isNullOrEmpty(topicName)) { return monoError(LOGGER, new IllegalArgumentException("'topicName' cannot be null or empty.")); } try { return entityClient.getWithResponseAsync(topicName, true, getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .handle((response, sink) -> { final Response<TopicProperties> deserialize = deserializeTopic(response); if (deserialize.getValue() == null) { final HttpResponse notFoundResponse = new EntityHelper.EntityNotFoundHttpResponse<>(deserialize); sink.error(new ResourceNotFoundException(String.format("Topic '%s' does not exist.", topicName), notFoundResponse)); } else { final T mapped = mapper.apply(deserialize.getValue()); sink.next(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), mapped)); } }); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Gets the first page of queues with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of queues. */ Mono<PagedResponse<QueueProperties>> listQueuesFirstPage(Context context) { try { return listQueues(0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of queues with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of queues or empty if there are no items left. */ Mono<PagedResponse<QueueProperties>> listQueuesNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listQueues(skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the first page of rules with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of rules. */ Mono<PagedResponse<RuleProperties>> listRulesFirstPage(String topicName, String subscriptionName, Context context) { try { return listRules(topicName, subscriptionName, 0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of rules with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of rules or empty if there are no items left. */ Mono<PagedResponse<RuleProperties>> listRulesNextPage(String topicName, String subscriptionName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listRules(topicName, subscriptionName, skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the first page of subscriptions with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsFirstPage(String topicName, Context context) { try { return listSubscriptions(topicName, 0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of subscriptions with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of subscriptions or empty if there are no items left. */ Mono<PagedResponse<SubscriptionProperties>> listSubscriptionsNextPage(String topicName, String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listSubscriptions(topicName, skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the first page of topics with context. * * @param context Context to pass into request. * * @return A Mono that completes with a page of topics. */ Mono<PagedResponse<TopicProperties>> listTopicsFirstPage(Context context) { try { return listTopics(0, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Gets the next page of topics with context. * * @param continuationToken Number of items to skip in feed. * @param context Context to pass into request. * * @return A Mono that completes with a page of topics or empty if there are no items left. */ Mono<PagedResponse<TopicProperties>> listTopicsNextPage(String continuationToken, Context context) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } try { final int skip = Integer.parseInt(continuationToken); return listTopics(skip, getTracingContext(context)); } catch (RuntimeException e) { return monoError(LOGGER, e); } } /** * Updates a queue with its context. * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link QueueProperties}. */ Mono<Response<QueueProperties>> updateQueueWithResponse(QueueProperties queue, Context context) { if (queue == null) { return monoError(LOGGER, new NullPointerException("'queue' cannot be null")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = getTracingContext(context.addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders())); final String forwardTo = getForwardToEntity(queue.getForwardTo(), contextWithHeaders); if (forwardTo != null) { queue.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(queue.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { queue.setForwardDeadLetteredMessagesTo(forwardDlq); } final CreateQueueBody createEntity = getCreateQueueBody(EntityHelper.toImplementation(queue)); try { return entityClient.putWithResponseAsync(queue.getName(), createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeQueue); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Updates a rule with its context. * * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link RuleProperties}. */ Mono<Response<RuleProperties>> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { if (rule == null) { return monoError(LOGGER, new NullPointerException("'rule' cannot be null")); } final CreateRuleBody ruleBody = getUpdateRuleBody(rule); try { return managementClient.getRules().putWithResponseAsync(topicName, subscriptionName, rule.getName(), ruleBody, "*", getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeRule); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Updates a subscription with its context. * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link SubscriptionProperties}. */ Mono<Response<SubscriptionProperties>> updateSubscriptionWithResponse(SubscriptionProperties subscription, Context context) { if (subscription == null) { return monoError(LOGGER, new NullPointerException("'subscription' cannot be null")); } context = context == null ? Context.NONE : context; final Context contextWithHeaders = context.addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(AZURE_REQUEST_HTTP_HEADERS_KEY, new HttpHeaders()); final String forwardTo = getForwardToEntity(subscription.getForwardTo(), contextWithHeaders); if (forwardTo != null) { subscription.setForwardTo(forwardTo); } final String forwardDlq = getForwardDlqEntity(subscription.getForwardDeadLetteredMessagesTo(), contextWithHeaders); if (forwardDlq != null) { subscription.setForwardDeadLetteredMessagesTo(forwardDlq); } final String topicName = subscription.getTopicName(); final String subscriptionName = subscription.getSubscriptionName(); final CreateSubscriptionBody createEntity = getCreateSubscriptionBody(EntityHelper.toImplementation(subscription)); try { return managementClient.getSubscriptions().putWithResponseAsync(topicName, subscriptionName, createEntity, "*", contextWithHeaders) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(response -> deserializeSubscription(topicName, response)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Updates a topic with its context. * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Context to pass into request. * * @return A Mono that completes with the updated {@link TopicProperties}. */ Mono<Response<TopicProperties>> updateTopicWithResponse(TopicProperties topic, Context context) { if (topic == null) { return monoError(LOGGER, new NullPointerException("'topic' cannot be null")); } final CreateTopicBody createEntity = getUpdateTopicBody(topic); try { return entityClient.putWithResponseAsync(topic.getName(), createEntity, "*", getTracingContext(context)) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .map(this::deserializeTopic); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private <T> T deserialize(Object object, Class<T> clazz) { if (object == null) { return null; } final String contents = String.valueOf(object); if (contents.isEmpty()) { return null; } try { return serializer.deserialize(contents, clazz); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Exception while deserializing. Body: [%s]. Class: %s", contents, clazz), e)); } } /** * Given an HTTP response, will deserialize it into a strongly typed Response object. * * @param response HTTP response to deserialize response body from. * @param clazz Class to deserialize response type into. * @param <T> Class type to deserialize response into. * * @return A Response with a strongly typed response value. */ private <T> Response<T> deserialize(Response<Object> response, Class<T> clazz) { final T deserialize = deserialize(response.getValue(), clazz); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), deserialize); } /** * Converts a Response into its corresponding {@link QueueDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<QueueProperties> deserializeQueue(Response<Object> response) { final QueueDescriptionEntry entry = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { LOGGER.info("entry.getContent() is null. The entity may not exist. {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getQueueDescription() == null) { final TopicDescriptionEntry entryTopic = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entryTopic != null && entryTopic.getContent() != null && entryTopic.getContent().getTopicDescription() != null) { LOGGER.warning("'{}' is not a queue, it is a topic.", entryTopic.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final QueueProperties result = EntityHelper.toModel(entry.getContent().getQueueDescription()); final String queueName = getTitleValue(entry.getTitle()); EntityHelper.setQueueName(result, queueName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Converts a Response into its corresponding {@link RuleDescriptionEntry} then mapped into {@link RuleProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<RuleProperties> deserializeRule(Response<Object> response) { final RuleDescriptionEntry entry = deserialize(response.getValue(), RuleDescriptionEntry.class); return getRulePropertiesSimpleResponse(response, entry); } /** * Converts a Response into its corresponding {@link SubscriptionDescriptionEntry} then mapped into {@link * SubscriptionProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<SubscriptionProperties> deserializeSubscription(String topicName, Response<Object> response) { final SubscriptionDescriptionEntry entry = deserialize(response.getValue(), SubscriptionDescriptionEntry.class); return getSubscriptionPropertiesSimpleResponse(topicName, response, entry); } /** * Converts a Response into its corresponding {@link TopicDescriptionEntry} then mapped into {@link * QueueProperties}. * * @param response HTTP Response to deserialize. * * @return The corresponding HTTP response with convenience properties set. */ private Response<TopicProperties> deserializeTopic(Response<Object> response) { final TopicDescriptionEntry entry = deserialize(response.getValue(), TopicDescriptionEntry.class); if (entry == null) { return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent() == null) { LOGGER.warning("entry.getContent() is null. There should have been content returned. Entry: {}", entry); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } else if (entry.getContent().getTopicDescription() == null) { final QueueDescriptionEntry entryQueue = deserialize(response.getValue(), QueueDescriptionEntry.class); if (entryQueue != null && entryQueue.getContent() != null && entryQueue.getContent().getQueueDescription() != null) { LOGGER.warning("'{}' is not a topic, it is a queue.", entryQueue.getTitle()); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), null); } } final TopicProperties result = EntityHelper.toModel(entry.getContent().getTopicDescription()); final String topicName = getTitleValue(entry.getTitle()); EntityHelper.setTopicName(result, topicName); return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), result); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of queues. */ private Mono<PagedResponse<QueueProperties>> listQueues(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(QUEUES_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<QueueDescriptionFeed> feedResponse = deserialize(response, QueueDescriptionFeed.class); final QueueDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize QueueDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<QueueProperties> entities = getQueues(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException("Could not parse response into FeedPage<QueueDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of rules. */ private Mono<PagedResponse<RuleProperties>> listRules(String topicName, String subscriptionName, int skip, Context context) { return managementClient.listRulesWithResponseAsync(topicName, subscriptionName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<RuleDescriptionFeed> feedResponse = deserialize(response, RuleDescriptionFeed.class); final RuleDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize RuleDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<RuleProperties> entities = getRules(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<RuleDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of subscriptions. */ private Mono<PagedResponse<SubscriptionProperties>> listSubscriptions(String topicName, int skip, Context context) { return managementClient.listSubscriptionsWithResponseAsync(topicName, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<SubscriptionDescriptionFeed> feedResponse = deserialize(response, SubscriptionDescriptionFeed.class); final SubscriptionDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize SubscriptionDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<SubscriptionProperties> entities = getSubscriptions(topicName, feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error(new RuntimeException( "Could not parse response into FeedPage<SubscriptionDescription>", error)); } }); } /** * Helper method that invokes the service method, extracts the data and translates it to a PagedResponse. * * @param skip Number of elements to skip. * @param context Context for the query. * * @return A Mono that completes with a paged response of topics. */ private Mono<PagedResponse<TopicProperties>> listTopics(int skip, Context context) { return managementClient.listEntitiesWithResponseAsync(TOPICS_ENTITY_TYPE, skip, NUMBER_OF_ELEMENTS, context) .onErrorMap(ServiceBusAdministrationAsyncClient::mapException) .flatMap(response -> { final Response<TopicDescriptionFeed> feedResponse = deserialize(response, TopicDescriptionFeed.class); final TopicDescriptionFeed feed = feedResponse.getValue(); if (feed == null) { LOGGER.warning("Could not deserialize TopicDescriptionFeed. skip {}, top: {}", skip, NUMBER_OF_ELEMENTS); return Mono.empty(); } final List<TopicProperties> entities = getTopics(feed); try { return Mono.just(extractPage(feedResponse, entities, feed.getLink())); } catch (MalformedURLException | UnsupportedEncodingException error) { return Mono.error( /** * Maps an exception from the ATOM APIs to its associated {@link HttpResponseException}. * * @param exception Exception from the ATOM API. * * @return The corresponding {@link HttpResponseException} or {@code throwable} if it is not an instance of {@link * ServiceBusManagementErrorException}. */ private static Throwable mapException(Throwable exception) { if (!(exception instanceof ServiceBusManagementErrorException)) { return exception; } final ServiceBusManagementErrorException managementError = ((ServiceBusManagementErrorException) exception); final ServiceBusManagementError error = managementError.getValue(); final HttpResponse errorHttpResponse = managementError.getResponse(); final int statusCode = error != null && error.getCode() != null ? error.getCode() : errorHttpResponse.getStatusCode(); final String errorDetail = error != null && error.getDetail() != null ? error.getDetail() : managementError.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, managementError.getResponse(), exception); case 404: return new ResourceNotFoundException(errorDetail, managementError.getResponse(), exception); case 409: return new ResourceExistsException(errorDetail, managementError.getResponse(), exception); case 412: return new ResourceModifiedException(errorDetail, managementError.getResponse(), exception); default: return new HttpResponseException(errorDetail, managementError.getResponse(), exception); } } private String getForwardDlqEntity(String forwardDlqToEntity, Context contextWithHeaders) { if (!CoreUtils.isNullOrEmpty(forwardDlqToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_DLQ_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardDlqToEntity, contextWithHeaders); return getAbsoluteUrlFromEntity(forwardDlqToEntity); } return null; } private String getForwardToEntity(String forwardToEntity, Context contextWithHeaders) { if (!CoreUtils.isNullOrEmpty(forwardToEntity)) { addSupplementaryAuthHeader(SERVICE_BUS_SUPPLEMENTARY_AUTHORIZATION_HEADER_NAME, forwardToEntity, contextWithHeaders); return getAbsoluteUrlFromEntity(forwardToEntity); } return null; } }
is possible we only call convertIfNeeded() once in constructor?
public String toString() { convertIfNeeded(); return azureCoreHeaders.toString(); }
convertIfNeeded();
public String toString() { convertIfNeeded(); return azureCoreHeaders.toString(); }
class OkHttpToAzureCoreHttpHeadersWrapper extends HttpHeaders { private final Headers okhttpHeaders; private HttpHeaders azureCoreHeaders; private boolean converted = false; public OkHttpToAzureCoreHttpHeadersWrapper(Headers okhttpHeaders) { this.okhttpHeaders = okhttpHeaders; this.azureCoreHeaders = new HttpHeaders(okhttpHeaders.size() * 2); } @Override public int getSize() { return converted ? azureCoreHeaders.getSize() : okhttpHeaders.size(); } @Override public HttpHeaders add(String name, String value) { if (name == null || value == null) { return this; } convertIfNeeded(); azureCoreHeaders.add(name, value); return this; } @Override public HttpHeaders set(String name, String value) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, value); return this; } @Override public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, values); return this; } @Override public HttpHeaders setAll(Map<String, List<String>> headers) { convertIfNeeded(); azureCoreHeaders.setAll(headers); return this; } @Override public HttpHeader get(String name) { convertIfNeeded(); return azureCoreHeaders.get(name); } @Override public HttpHeader remove(String name) { convertIfNeeded(); return azureCoreHeaders.remove(name); } @Override public String getValue(String name) { convertIfNeeded(); return azureCoreHeaders.getValue(name); } @Override public String[] getValues(String name) { convertIfNeeded(); return azureCoreHeaders.getValues(name); } @Override public Map<String, String> toMap() { convertIfNeeded(); return azureCoreHeaders.toMap(); } @Override public Iterator<HttpHeader> iterator() { convertIfNeeded(); return azureCoreHeaders.iterator(); } @Override public Stream<HttpHeader> stream() { convertIfNeeded(); return azureCoreHeaders.stream(); } @Override private void convertIfNeeded() { if (converted) { return; } azureCoreHeaders = fromOkHttpHeaders(okhttpHeaders); converted = true; } }
class OkHttpToAzureCoreHttpHeadersWrapper extends HttpHeaders { private final Headers okhttpHeaders; private HttpHeaders azureCoreHeaders; private boolean converted = false; public OkHttpToAzureCoreHttpHeadersWrapper(Headers okhttpHeaders) { this.okhttpHeaders = okhttpHeaders; this.azureCoreHeaders = new HttpHeaders(okhttpHeaders.size() * 2); } @Override public int getSize() { return converted ? azureCoreHeaders.getSize() : okhttpHeaders.size(); } @Override public HttpHeaders add(String name, String value) { if (name == null || value == null) { return this; } convertIfNeeded(); azureCoreHeaders.add(name, value); return this; } @Override public HttpHeaders set(String name, String value) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, value); return this; } @Override public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, values); return this; } @Override public HttpHeaders setAll(Map<String, List<String>> headers) { convertIfNeeded(); azureCoreHeaders.setAll(headers); return this; } @Override public HttpHeader get(String name) { convertIfNeeded(); return azureCoreHeaders.get(name); } @Override public HttpHeader remove(String name) { convertIfNeeded(); return azureCoreHeaders.remove(name); } @Override public String getValue(String name) { convertIfNeeded(); return azureCoreHeaders.getValue(name); } @Override public String[] getValues(String name) { convertIfNeeded(); return azureCoreHeaders.getValues(name); } @Override public Map<String, String> toMap() { convertIfNeeded(); return azureCoreHeaders.toMap(); } @Override public Iterator<HttpHeader> iterator() { convertIfNeeded(); return azureCoreHeaders.iterator(); } @Override public Stream<HttpHeader> stream() { convertIfNeeded(); return azureCoreHeaders.stream(); } @Override private void convertIfNeeded() { if (converted) { return; } azureCoreHeaders = fromOkHttpHeaders(okhttpHeaders); converted = true; } }
Calling `convertIfNeeded` in the constructor would defeat the purpose of deferring the conversion of OkHttp's `Headers` to `azure-core`'s `HttpHeaders` as it would be converted eagerly.
public String toString() { convertIfNeeded(); return azureCoreHeaders.toString(); }
convertIfNeeded();
public String toString() { convertIfNeeded(); return azureCoreHeaders.toString(); }
class OkHttpToAzureCoreHttpHeadersWrapper extends HttpHeaders { private final Headers okhttpHeaders; private HttpHeaders azureCoreHeaders; private boolean converted = false; public OkHttpToAzureCoreHttpHeadersWrapper(Headers okhttpHeaders) { this.okhttpHeaders = okhttpHeaders; this.azureCoreHeaders = new HttpHeaders(okhttpHeaders.size() * 2); } @Override public int getSize() { return converted ? azureCoreHeaders.getSize() : okhttpHeaders.size(); } @Override public HttpHeaders add(String name, String value) { if (name == null || value == null) { return this; } convertIfNeeded(); azureCoreHeaders.add(name, value); return this; } @Override public HttpHeaders set(String name, String value) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, value); return this; } @Override public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, values); return this; } @Override public HttpHeaders setAll(Map<String, List<String>> headers) { convertIfNeeded(); azureCoreHeaders.setAll(headers); return this; } @Override public HttpHeader get(String name) { convertIfNeeded(); return azureCoreHeaders.get(name); } @Override public HttpHeader remove(String name) { convertIfNeeded(); return azureCoreHeaders.remove(name); } @Override public String getValue(String name) { convertIfNeeded(); return azureCoreHeaders.getValue(name); } @Override public String[] getValues(String name) { convertIfNeeded(); return azureCoreHeaders.getValues(name); } @Override public Map<String, String> toMap() { convertIfNeeded(); return azureCoreHeaders.toMap(); } @Override public Iterator<HttpHeader> iterator() { convertIfNeeded(); return azureCoreHeaders.iterator(); } @Override public Stream<HttpHeader> stream() { convertIfNeeded(); return azureCoreHeaders.stream(); } @Override private void convertIfNeeded() { if (converted) { return; } azureCoreHeaders = fromOkHttpHeaders(okhttpHeaders); converted = true; } }
class OkHttpToAzureCoreHttpHeadersWrapper extends HttpHeaders { private final Headers okhttpHeaders; private HttpHeaders azureCoreHeaders; private boolean converted = false; public OkHttpToAzureCoreHttpHeadersWrapper(Headers okhttpHeaders) { this.okhttpHeaders = okhttpHeaders; this.azureCoreHeaders = new HttpHeaders(okhttpHeaders.size() * 2); } @Override public int getSize() { return converted ? azureCoreHeaders.getSize() : okhttpHeaders.size(); } @Override public HttpHeaders add(String name, String value) { if (name == null || value == null) { return this; } convertIfNeeded(); azureCoreHeaders.add(name, value); return this; } @Override public HttpHeaders set(String name, String value) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, value); return this; } @Override public HttpHeaders set(String name, List<String> values) { if (name == null) { return this; } convertIfNeeded(); azureCoreHeaders.set(name, values); return this; } @Override public HttpHeaders setAll(Map<String, List<String>> headers) { convertIfNeeded(); azureCoreHeaders.setAll(headers); return this; } @Override public HttpHeader get(String name) { convertIfNeeded(); return azureCoreHeaders.get(name); } @Override public HttpHeader remove(String name) { convertIfNeeded(); return azureCoreHeaders.remove(name); } @Override public String getValue(String name) { convertIfNeeded(); return azureCoreHeaders.getValue(name); } @Override public String[] getValues(String name) { convertIfNeeded(); return azureCoreHeaders.getValues(name); } @Override public Map<String, String> toMap() { convertIfNeeded(); return azureCoreHeaders.toMap(); } @Override public Iterator<HttpHeader> iterator() { convertIfNeeded(); return azureCoreHeaders.iterator(); } @Override public Stream<HttpHeader> stream() { convertIfNeeded(); return azureCoreHeaders.stream(); } @Override private void convertIfNeeded() { if (converted) { return; } azureCoreHeaders = fromOkHttpHeaders(okhttpHeaders); converted = true; } }
Given drain-loop is a hot path, I wonder if we should avoid hopping through objects to check the receive mode. We could have a boolean property in the class level (e.g., `isPeekLockReceiveMode`) and use it here instead.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) {
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Nit; Is it possible to consolidate all comments in one place (preferably here)? by updating current comment to something like - "When Prefetch is disabled, for the receive mode that influences the delivery count (today, only PeekLock ReceiveMode), we try to release undelivered messages to adjust the delivery count on the broker"
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Do we really need this check to exit the drain loop? In the next iteration of the drain loop, we exit if there is no current work
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Maybe pass this in as a constructor parameter since it is a read-only option that is set at client creation time
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) {
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
there is another loop, ```java while (numberRequested != 0L && !isEmpty) { ``` we have no current work, the requested != 0 and the buffer queue is not empty, this loop will never exit.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Good idea, I'll change this.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) {
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
there are three loop in this method: ```java while (numberRequested != numberConsumed) { ``` this loop will exit when re-buffer messages or numberConsumed equal requested. ```java while (!isEmitted) { ``` this loop will exit when emit the message or no current work. this is the loop you mentioned. ```java while (numberRequested != 0L && !isEmpty) { ``` this loop I mentioned above, in that scenario, it will never exit unless add a new queue work.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
should we remove the doFinally logic from the tests?
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.info("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
.doFinally((SignalType signal) -> {
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseClient(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) { List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null) .collectList() .block(); if (res.size() != 0) { CosmosAsyncDatabase database = client.getDatabase(databaseId); database.read().block(); return database; } else { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.debug("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.debug("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } static protected void safeClose(CosmosAsyncClient client) { if (client != null) { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeCloseAsync(CosmosAsyncClient client) { if (client != null) { new Thread(() -> { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } }).start(); } } static protected void safeCloseClient(CosmosAsyncClient client) { if (client != null) { try { logger.info("closing client ..."); client.close(); logger.info("closing client completed"); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) { if (database != null) { List<CosmosContainerProperties> collections = database.readAllContainers() .collectList() .block(); for (CosmosContainerProperties collection : collections) { database.getContainer(collection.getId()).delete().block(); } } } static protected void safeDeleteCollection(CosmosAsyncContainer collection) { if (collection != null) { try { collection.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) { if (database != null && collectionId != null) { try { database.getContainer(collectionId).delete().block(); } catch (Exception e) { } } } static protected void safeDeleteDatabase(CosmosAsyncDatabase database) { if (database != null) { try { database.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteSyncDatabase(CosmosDatabase database) { if (database != null) { try { logger.info("attempting to delete database ...."); database.delete(); logger.info("database deletion completed"); } catch (Exception e) { logger.error("failed to delete sync database", e); } } } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseAsync(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } }
this means in early code, if there is no current queue work, requested is not 0 and buffer message queue is not empty, the loop cannot exit, it will be hanging there.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
When a new work coming in, we will call `drain()`, so I am thinking if we could directly return if `currentDownstream == null`.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
if we have some messages to release and just check `currentDownstream == null`, these message will remain in buffer until next work come in.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
try also avoid busy loop (that there is no downstream, but loop continue executing without any wait on other resources, using CPU)
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Kun suggest move this check into check current downtream, that's more clear.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled && asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } currentDownstream = getOrUpdateCurrentWork(); if (numberConsumed == 0 && currentDownstream == null) { break; } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
}
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested != numberConsumed) { if (isEmpty || isTerminated()) { break; } final ServiceBusReceivedMessage message = bufferMessages.poll(); boolean isEmitted = false; while (!isEmitted) { currentDownstream = getOrUpdateCurrentWork(); if (currentDownstream == null && isReceiveDeleteMode) { bufferMessages.addFirst(message); return; } if (currentDownstream == null) { break; } isEmitted = currentDownstream.emitNext(message); } if (!isEmitted) { if (isPrefetchDisabled) { asyncClient.release(message).subscribe(__ -> { }, error -> LOGGER.atWarning() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Couldn't release the message.", error), () -> LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, message.getLockToken()) .log("Message successfully released.")); } else { bufferMessages.addFirst(message); break; } } numberConsumed++; isEmpty = bufferMessages.isEmpty(); } final long requestedMessages = REQUESTED.get(this); if (requestedMessages != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberConsumed); } } if (numberRequested == 0L) { LOGGER.atVerbose() .log("Current work is completed. Schedule next work."); getOrUpdateCurrentWork(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private static final ClientLogger LOGGER = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLinkedQueue<SynchronousReceiveWork> workQueue = new ConcurrentLinkedQueue<>(); private final ConcurrentLinkedDeque<ServiceBusReceivedMessage> bufferMessages = new ConcurrentLinkedDeque<>(); private final Object currentWorkLock = new Object(); private final ServiceBusReceiverAsyncClient asyncClient; private final boolean isPrefetchDisabled; private final Duration operationTimeout; private final boolean isReceiveDeleteMode; private volatile SynchronousReceiveWork currentWork; /** * The number of requested messages. */ private volatile long requested; private static final AtomicLongFieldUpdater<SynchronousMessageSubscriber> REQUESTED = AtomicLongFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, "requested"); private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<SynchronousMessageSubscriber, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(SynchronousMessageSubscriber.class, Subscription.class, "upstream"); /** * Creates a synchronous subscriber with some initial work to queue. * * * @param asyncClient Client to update disposition of messages. * @param isPrefetchDisabled Indicates if the prefetch is disabled. * @param operationTimeout Timeout to wait for operation to complete. * @param initialWork Initial work to queue. * * <p> * When {@code isPrefetchDisabled} is true, we release the messages those received during the timespan * between the last terminated downstream and the next active downstream. * </p> * * @throws NullPointerException if {@code initialWork} is null. * @throws IllegalArgumentException if {@code initialWork.getNumberOfEvents()} is less than 1. */ SynchronousMessageSubscriber(ServiceBusReceiverAsyncClient asyncClient, SynchronousReceiveWork initialWork, boolean isPrefetchDisabled, Duration operationTimeout) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); this.workQueue.add(Objects.requireNonNull(initialWork, "'initialWork' cannot be null.")); this.isPrefetchDisabled = isPrefetchDisabled; this.isReceiveDeleteMode = asyncClient.getReceiverOptions().getReceiveMode() == ServiceBusReceiveMode.RECEIVE_AND_DELETE; if (initialWork.getNumberOfEvents() < 1) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "'numberOfEvents' cannot be less than 1. Actual: " + initialWork.getNumberOfEvents())); } Operators.addCap(REQUESTED, this, initialWork.getNumberOfEvents()); } /** * On an initial subscription, will take the first work item, and request that amount of work for it. * * @param subscription Subscription for upstream. */ @Override protected void hookOnSubscribe(Subscription subscription) { if (!Operators.setOnce(UPSTREAM, this, subscription)) { LOGGER.warning("This should only be subscribed to once. Ignoring subscription."); return; } getOrUpdateCurrentWork(); subscription.request(REQUESTED.get(this)); } /** * Publishes the event to the current {@link SynchronousReceiveWork}. If that work item is complete, will dispose of * the subscriber. * * @param message Event to publish. */ @Override protected void hookOnNext(ServiceBusReceivedMessage message) { if (isTerminated()) { Operators.onNextDropped(message, Context.empty()); } else { bufferMessages.add(message); drain(); } } /** * Queue the work to be picked up by drain loop. * * @param work to be queued. */ void queueWork(SynchronousReceiveWork work) { Objects.requireNonNull(work, "'work' cannot be null"); workQueue.add(work); LoggingEventBuilder logBuilder = LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .addKeyValue("timeout", work.getTimeout()); if (workQueue.peek() == work && (currentWork == null || currentWork.isTerminal())) { logBuilder.log("First work in queue. Requesting upstream if needed."); getOrUpdateCurrentWork(); } else { logBuilder.log("Queuing receive work."); } if (UPSTREAM.get(this) != null) { drain(); } } /** * Drain the work, only one thread can be in this loop at a time. */ private void drain() { if (wip.getAndIncrement() != 0) { return; } int missed = 1; while (missed != 0) { try { drainQueue(); } finally { missed = wip.addAndGet(-missed); } } } /*** * Drain the queue using a lock on current work in progress. */ /** * {@inheritDoc} */ @Override protected void hookOnError(Throwable throwable) { dispose("Errors occurred upstream", throwable); } @Override protected void hookOnCancel() { this.dispose(); } private boolean isTerminated() { if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { return true; } return isDisposed.get(); } /** * Gets the current work item if it is not terminal and cleans up any existing timeout operations. * * @return Gets or sets the next work item. Null if there are no work items currently. */ private SynchronousReceiveWork getOrUpdateCurrentWork() { synchronized (currentWorkLock) { if (currentWork != null && !currentWork.isTerminal()) { return currentWork; } currentWork = workQueue.poll(); while (currentWork != null && currentWork.isTerminal()) { LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, currentWork.getId()) .addKeyValue("numberOfEvents", currentWork.getNumberOfEvents()) .log("This work from queue is terminal. Skip it."); currentWork = workQueue.poll(); } if (currentWork != null) { final SynchronousReceiveWork work = currentWork; LOGGER.atVerbose() .addKeyValue(WORK_ID_KEY, work.getId()) .addKeyValue("numberOfEvents", work.getNumberOfEvents()) .log("Current work updated."); work.start(); requestUpstream(work.getNumberOfEvents()); } return currentWork; } } /** * Adds additional credits upstream if {@code numberOfMessages} is greater than the number of {@code REQUESTED} * items. * * @param numberOfMessages Number of messages required downstream. */ private void requestUpstream(long numberOfMessages) { if (isTerminated()) { LOGGER.info("Cannot request more messages upstream. Subscriber is terminated."); return; } final Subscription subscription = UPSTREAM.get(this); if (subscription == null) { LOGGER.info("There is no upstream to request messages from."); return; } final long currentRequested = REQUESTED.get(this); final long difference = numberOfMessages - currentRequested; LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, currentRequested) .addKeyValue("numberOfMessages", numberOfMessages) .addKeyValue("difference", difference) .log("Requesting messages from upstream."); if (difference <= 0) { return; } Operators.addCap(REQUESTED, this, difference); subscription.request(difference); } @Override public void dispose() { super.dispose(); dispose("Upstream completed the receive work.", null); } private void dispose(String message, Throwable throwable) { super.dispose(); if (isDisposed.getAndSet(true)) { return; } synchronized (currentWorkLock) { if (currentWork != null) { currentWork.complete(message, throwable); currentWork = null; } SynchronousReceiveWork w = workQueue.poll(); while (w != null) { w.complete(message, throwable); w = workQueue.poll(); } } } /** * package-private method to check queue size. * * @return The current number of items in the queue. */ int getWorkQueueSize() { return this.workQueue.size(); } }
Why do we need a try-catch at all?
public static HandleItem transformHandleItem(com.azure.storage.file.share.implementation.models.HandleItem handleItem) { try { return new HandleItem() .setHandleId(handleItem.getHandleId()) .setPath(decodeName(handleItem.getPath())) .setSessionId(handleItem.getSessionId()) .setClientIp(handleItem.getClientIp()) .setFileId(handleItem.getFileId()) .setParentId(handleItem.getParentId()) .setLastReconnectTime(handleItem.getLastReconnectTime()) .setOpenTime(handleItem.getOpenTime()); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(e)); } }
try {
public static HandleItem transformHandleItem(com.azure.storage.file.share.implementation.models.HandleItem handleItem) { return new HandleItem() .setHandleId(handleItem.getHandleId()) .setPath(decodeName(handleItem.getPath())) .setSessionId(handleItem.getSessionId()) .setClientIp(handleItem.getClientIp()) .setFileId(handleItem.getFileId()) .setParentId(handleItem.getParentId()) .setLastReconnectTime(handleItem.getLastReconnectTime()) .setOpenTime(handleItem.getOpenTime()); }
class ModelHelper { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private static final ClientLogger LOGGER = new ClientLogger(ModelHelper.class); private static final long MAX_FILE_PUT_RANGE_BYTES = 4 * Constants.MB; private static final int FILE_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new * object for safety. * * @param other The options to fill in defaults. * @return An object with defaults filled in for null values in the original. */ public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) { other = other == null ? new ParallelTransferOptions() : other; if (other.getBlockSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.blockSize", other.getBlockSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } if (other.getMaxSingleUploadSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.maxSingleUploadSize", other.getMaxSingleUploadSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } Long blockSize = other.getBlockSizeLong(); if (blockSize == null) { blockSize = MAX_FILE_PUT_RANGE_BYTES; } Integer maxConcurrency = other.getMaxConcurrency(); if (maxConcurrency == null) { maxConcurrency = FILE_DEFAULT_NUMBER_OF_BUFFERS; } Long maxSingleUploadSize = other.getMaxSingleUploadSizeLong(); if (maxSingleUploadSize == null) { maxSingleUploadSize = MAX_FILE_PUT_RANGE_BYTES; } return new ParallelTransferOptions() .setBlockSizeLong(blockSize) .setMaxConcurrency(maxConcurrency) .setProgressListener(other.getProgressListener()) .setMaxSingleUploadSizeLong(maxSingleUploadSize); } /** * Converts an internal type to a public type. * * @param option {@link ShareSnapshotsDeleteOptionType} * @return {@link DeleteSnapshotsOptionType} */ public static DeleteSnapshotsOptionType toDeleteSnapshotsOptionType(ShareSnapshotsDeleteOptionType option) { if (option == null) { return null; } switch (option) { case INCLUDE: return DeleteSnapshotsOptionType.INCLUDE; case INCLUDE_WITH_LEASED: return DeleteSnapshotsOptionType.INCLUDE_LEASED; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Invalid " + option.getClass())); } } /** * Transforms {@link ShareItemInternal} into a public {@link ShareItem}. * * @param shareItemInternal {@link ShareItemInternal} * @return {@link ShareItem} */ public static ShareItem populateShareItem(ShareItemInternal shareItemInternal) { ShareItem item = new ShareItem(); item.setName(shareItemInternal.getName()); item.setSnapshot(shareItemInternal.getSnapshot()); item.setDeleted(shareItemInternal.isDeleted()); item.setVersion(shareItemInternal.getVersion()); item.setProperties(populateShareProperties(shareItemInternal.getProperties())); item.setMetadata(shareItemInternal.getMetadata()); return item; } /** * Transforms {@link SharePropertiesInternal} into a public {@link ShareProperties}. * * @param sharePropertiesInternal {@link SharePropertiesInternal} * @return {@link ShareProperties} */ public static ShareProperties populateShareProperties(SharePropertiesInternal sharePropertiesInternal) { ShareProperties properties = new ShareProperties(); properties.setLastModified(sharePropertiesInternal.getLastModified()); properties.setETag(sharePropertiesInternal.getETag()); properties.setQuota(sharePropertiesInternal.getQuota()); properties.setProvisionedIops(sharePropertiesInternal.getProvisionedIops()); properties.setProvisionedIngressMBps(sharePropertiesInternal.getProvisionedIngressMBps()); properties.setProvisionedEgressMBps(sharePropertiesInternal.getProvisionedEgressMBps()); properties.setNextAllowedQuotaDowngradeTime(sharePropertiesInternal.getNextAllowedQuotaDowngradeTime()); properties.setDeletedTime(sharePropertiesInternal.getDeletedTime()); properties.setRemainingRetentionDays(sharePropertiesInternal.getRemainingRetentionDays()); properties.setAccessTier(sharePropertiesInternal.getAccessTier()); properties.setAccessTierChangeTime(sharePropertiesInternal.getAccessTierChangeTime()); properties.setAccessTierTransitionState(sharePropertiesInternal.getAccessTierTransitionState()); properties.setLeaseStatus(sharePropertiesInternal.getLeaseStatus()); properties.setLeaseState(sharePropertiesInternal.getLeaseState()); properties.setLeaseDuration(sharePropertiesInternal.getLeaseDuration()); properties.setProtocols(parseShareProtocols(sharePropertiesInternal.getEnabledProtocols())); properties.setRootSquash(sharePropertiesInternal.getRootSquash()); properties.setMetadata(sharePropertiesInternal.getMetadata()); properties.setProvisionedBandwidthMiBps(sharePropertiesInternal.getProvisionedBandwidthMiBps()); return properties; } /** * Parses a {@code String} into a {@code ShareProtocols}. Unrecognized protocols will be ignored. * * @param str The string to parse. * @return A {@code ShareProtocols} represented by the string. */ public static ShareProtocols parseShareProtocols(String str) { if (str == null) { return null; } ShareProtocols protocols = new ShareProtocols(); for (String s : str.split(",")) { switch (s) { case Constants.HeaderConstants.SMB_PROTOCOL: protocols.setSmbEnabled(true); break; case Constants.HeaderConstants.NFS_PROTOCOL: protocols.setNfsEnabled(true); break; default: } } return protocols; } public static ServicesListSharesSegmentHeaders transformListSharesHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ServicesListSharesSegmentHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static ShareFileDownloadHeaders transformFileDownloadHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ShareFileDownloadHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static String getETag(HttpHeaders headers) { return headers.getValue("ETag"); } public static ShareFileItemProperties transformFileProperty(FileProperty property) { if (property == null) { return null; } return new InternalShareFileItemProperties(property.getCreationTime(), property.getLastAccessTime(), property.getLastWriteTime(), property.getChangeTime(), property.getLastModified(), property.getEtag()); } public static List<HandleItem> transformHandleItems(List<com.azure.storage.file.share.implementation.models.HandleItem> handleItems) { List<HandleItem> result = new ArrayList<>(); handleItems.forEach(item -> { result.add(transformHandleItem(item)); }); return result; } public static String decodeName(StringEncoded stringEncoded) throws UnsupportedEncodingException { if (stringEncoded.isEncoded() != null && stringEncoded.isEncoded()) { return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString()); } else { return stringEncoded.getContent(); } } }
class ModelHelper { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private static final ClientLogger LOGGER = new ClientLogger(ModelHelper.class); private static final long MAX_FILE_PUT_RANGE_BYTES = 4 * Constants.MB; private static final int FILE_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new * object for safety. * * @param other The options to fill in defaults. * @return An object with defaults filled in for null values in the original. */ public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) { other = other == null ? new ParallelTransferOptions() : other; if (other.getBlockSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.blockSize", other.getBlockSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } if (other.getMaxSingleUploadSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.maxSingleUploadSize", other.getMaxSingleUploadSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } Long blockSize = other.getBlockSizeLong(); if (blockSize == null) { blockSize = MAX_FILE_PUT_RANGE_BYTES; } Integer maxConcurrency = other.getMaxConcurrency(); if (maxConcurrency == null) { maxConcurrency = FILE_DEFAULT_NUMBER_OF_BUFFERS; } Long maxSingleUploadSize = other.getMaxSingleUploadSizeLong(); if (maxSingleUploadSize == null) { maxSingleUploadSize = MAX_FILE_PUT_RANGE_BYTES; } return new ParallelTransferOptions() .setBlockSizeLong(blockSize) .setMaxConcurrency(maxConcurrency) .setProgressListener(other.getProgressListener()) .setMaxSingleUploadSizeLong(maxSingleUploadSize); } /** * Converts an internal type to a public type. * * @param option {@link ShareSnapshotsDeleteOptionType} * @return {@link DeleteSnapshotsOptionType} */ public static DeleteSnapshotsOptionType toDeleteSnapshotsOptionType(ShareSnapshotsDeleteOptionType option) { if (option == null) { return null; } switch (option) { case INCLUDE: return DeleteSnapshotsOptionType.INCLUDE; case INCLUDE_WITH_LEASED: return DeleteSnapshotsOptionType.INCLUDE_LEASED; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Invalid " + option.getClass())); } } /** * Transforms {@link ShareItemInternal} into a public {@link ShareItem}. * * @param shareItemInternal {@link ShareItemInternal} * @return {@link ShareItem} */ public static ShareItem populateShareItem(ShareItemInternal shareItemInternal) { ShareItem item = new ShareItem(); item.setName(shareItemInternal.getName()); item.setSnapshot(shareItemInternal.getSnapshot()); item.setDeleted(shareItemInternal.isDeleted()); item.setVersion(shareItemInternal.getVersion()); item.setProperties(populateShareProperties(shareItemInternal.getProperties())); item.setMetadata(shareItemInternal.getMetadata()); return item; } /** * Transforms {@link SharePropertiesInternal} into a public {@link ShareProperties}. * * @param sharePropertiesInternal {@link SharePropertiesInternal} * @return {@link ShareProperties} */ public static ShareProperties populateShareProperties(SharePropertiesInternal sharePropertiesInternal) { ShareProperties properties = new ShareProperties(); properties.setLastModified(sharePropertiesInternal.getLastModified()); properties.setETag(sharePropertiesInternal.getETag()); properties.setQuota(sharePropertiesInternal.getQuota()); properties.setProvisionedIops(sharePropertiesInternal.getProvisionedIops()); properties.setProvisionedIngressMBps(sharePropertiesInternal.getProvisionedIngressMBps()); properties.setProvisionedEgressMBps(sharePropertiesInternal.getProvisionedEgressMBps()); properties.setNextAllowedQuotaDowngradeTime(sharePropertiesInternal.getNextAllowedQuotaDowngradeTime()); properties.setDeletedTime(sharePropertiesInternal.getDeletedTime()); properties.setRemainingRetentionDays(sharePropertiesInternal.getRemainingRetentionDays()); properties.setAccessTier(sharePropertiesInternal.getAccessTier()); properties.setAccessTierChangeTime(sharePropertiesInternal.getAccessTierChangeTime()); properties.setAccessTierTransitionState(sharePropertiesInternal.getAccessTierTransitionState()); properties.setLeaseStatus(sharePropertiesInternal.getLeaseStatus()); properties.setLeaseState(sharePropertiesInternal.getLeaseState()); properties.setLeaseDuration(sharePropertiesInternal.getLeaseDuration()); properties.setProtocols(parseShareProtocols(sharePropertiesInternal.getEnabledProtocols())); properties.setRootSquash(sharePropertiesInternal.getRootSquash()); properties.setMetadata(sharePropertiesInternal.getMetadata()); properties.setProvisionedBandwidthMiBps(sharePropertiesInternal.getProvisionedBandwidthMiBps()); return properties; } /** * Parses a {@code String} into a {@code ShareProtocols}. Unrecognized protocols will be ignored. * * @param str The string to parse. * @return A {@code ShareProtocols} represented by the string. */ public static ShareProtocols parseShareProtocols(String str) { if (str == null) { return null; } ShareProtocols protocols = new ShareProtocols(); for (String s : str.split(",")) { switch (s) { case Constants.HeaderConstants.SMB_PROTOCOL: protocols.setSmbEnabled(true); break; case Constants.HeaderConstants.NFS_PROTOCOL: protocols.setNfsEnabled(true); break; default: } } return protocols; } public static ServicesListSharesSegmentHeaders transformListSharesHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ServicesListSharesSegmentHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static ShareFileDownloadHeaders transformFileDownloadHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ShareFileDownloadHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static String getETag(HttpHeaders headers) { return headers.getValue("ETag"); } public static ShareFileItemProperties transformFileProperty(FileProperty property) { if (property == null) { return null; } return new InternalShareFileItemProperties(property.getCreationTime(), property.getLastAccessTime(), property.getLastWriteTime(), property.getChangeTime(), property.getLastModified(), property.getEtag()); } public static List<HandleItem> transformHandleItems(List<com.azure.storage.file.share.implementation.models.HandleItem> handleItems) { List<HandleItem> result = new ArrayList<>(); handleItems.forEach(item -> { result.add(transformHandleItem(item)); }); return result; } public static String decodeName(StringEncoded stringEncoded) { if (stringEncoded.isEncoded() != null && stringEncoded.isEncoded()) { try { return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString()); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(e)); } } else { return stringEncoded.getContent(); } } }
I'd go with `IllegalStateException` here instead of `IllegalArgumentException`, UTF-8 should exist everywhere and if not we have a big problem 😃
public static String decodeName(StringEncoded stringEncoded) { if (stringEncoded.isEncoded() != null && stringEncoded.isEncoded()) { try { return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString()); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(e)); } } else { return stringEncoded.getContent(); } }
throw LOGGER.logExceptionAsError(new IllegalArgumentException(e));
public static String decodeName(StringEncoded stringEncoded) { if (stringEncoded.isEncoded() != null && stringEncoded.isEncoded()) { try { return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString()); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(e)); } } else { return stringEncoded.getContent(); } }
class ModelHelper { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private static final ClientLogger LOGGER = new ClientLogger(ModelHelper.class); private static final long MAX_FILE_PUT_RANGE_BYTES = 4 * Constants.MB; private static final int FILE_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new * object for safety. * * @param other The options to fill in defaults. * @return An object with defaults filled in for null values in the original. */ public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) { other = other == null ? new ParallelTransferOptions() : other; if (other.getBlockSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.blockSize", other.getBlockSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } if (other.getMaxSingleUploadSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.maxSingleUploadSize", other.getMaxSingleUploadSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } Long blockSize = other.getBlockSizeLong(); if (blockSize == null) { blockSize = MAX_FILE_PUT_RANGE_BYTES; } Integer maxConcurrency = other.getMaxConcurrency(); if (maxConcurrency == null) { maxConcurrency = FILE_DEFAULT_NUMBER_OF_BUFFERS; } Long maxSingleUploadSize = other.getMaxSingleUploadSizeLong(); if (maxSingleUploadSize == null) { maxSingleUploadSize = MAX_FILE_PUT_RANGE_BYTES; } return new ParallelTransferOptions() .setBlockSizeLong(blockSize) .setMaxConcurrency(maxConcurrency) .setProgressListener(other.getProgressListener()) .setMaxSingleUploadSizeLong(maxSingleUploadSize); } /** * Converts an internal type to a public type. * * @param option {@link ShareSnapshotsDeleteOptionType} * @return {@link DeleteSnapshotsOptionType} */ public static DeleteSnapshotsOptionType toDeleteSnapshotsOptionType(ShareSnapshotsDeleteOptionType option) { if (option == null) { return null; } switch (option) { case INCLUDE: return DeleteSnapshotsOptionType.INCLUDE; case INCLUDE_WITH_LEASED: return DeleteSnapshotsOptionType.INCLUDE_LEASED; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Invalid " + option.getClass())); } } /** * Transforms {@link ShareItemInternal} into a public {@link ShareItem}. * * @param shareItemInternal {@link ShareItemInternal} * @return {@link ShareItem} */ public static ShareItem populateShareItem(ShareItemInternal shareItemInternal) { ShareItem item = new ShareItem(); item.setName(shareItemInternal.getName()); item.setSnapshot(shareItemInternal.getSnapshot()); item.setDeleted(shareItemInternal.isDeleted()); item.setVersion(shareItemInternal.getVersion()); item.setProperties(populateShareProperties(shareItemInternal.getProperties())); item.setMetadata(shareItemInternal.getMetadata()); return item; } /** * Transforms {@link SharePropertiesInternal} into a public {@link ShareProperties}. * * @param sharePropertiesInternal {@link SharePropertiesInternal} * @return {@link ShareProperties} */ public static ShareProperties populateShareProperties(SharePropertiesInternal sharePropertiesInternal) { ShareProperties properties = new ShareProperties(); properties.setLastModified(sharePropertiesInternal.getLastModified()); properties.setETag(sharePropertiesInternal.getETag()); properties.setQuota(sharePropertiesInternal.getQuota()); properties.setProvisionedIops(sharePropertiesInternal.getProvisionedIops()); properties.setProvisionedIngressMBps(sharePropertiesInternal.getProvisionedIngressMBps()); properties.setProvisionedEgressMBps(sharePropertiesInternal.getProvisionedEgressMBps()); properties.setNextAllowedQuotaDowngradeTime(sharePropertiesInternal.getNextAllowedQuotaDowngradeTime()); properties.setDeletedTime(sharePropertiesInternal.getDeletedTime()); properties.setRemainingRetentionDays(sharePropertiesInternal.getRemainingRetentionDays()); properties.setAccessTier(sharePropertiesInternal.getAccessTier()); properties.setAccessTierChangeTime(sharePropertiesInternal.getAccessTierChangeTime()); properties.setAccessTierTransitionState(sharePropertiesInternal.getAccessTierTransitionState()); properties.setLeaseStatus(sharePropertiesInternal.getLeaseStatus()); properties.setLeaseState(sharePropertiesInternal.getLeaseState()); properties.setLeaseDuration(sharePropertiesInternal.getLeaseDuration()); properties.setProtocols(parseShareProtocols(sharePropertiesInternal.getEnabledProtocols())); properties.setRootSquash(sharePropertiesInternal.getRootSquash()); properties.setMetadata(sharePropertiesInternal.getMetadata()); properties.setProvisionedBandwidthMiBps(sharePropertiesInternal.getProvisionedBandwidthMiBps()); return properties; } /** * Parses a {@code String} into a {@code ShareProtocols}. Unrecognized protocols will be ignored. * * @param str The string to parse. * @return A {@code ShareProtocols} represented by the string. */ public static ShareProtocols parseShareProtocols(String str) { if (str == null) { return null; } ShareProtocols protocols = new ShareProtocols(); for (String s : str.split(",")) { switch (s) { case Constants.HeaderConstants.SMB_PROTOCOL: protocols.setSmbEnabled(true); break; case Constants.HeaderConstants.NFS_PROTOCOL: protocols.setNfsEnabled(true); break; default: } } return protocols; } public static ServicesListSharesSegmentHeaders transformListSharesHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ServicesListSharesSegmentHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static ShareFileDownloadHeaders transformFileDownloadHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ShareFileDownloadHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static String getETag(HttpHeaders headers) { return headers.getValue("ETag"); } public static ShareFileItemProperties transformFileProperty(FileProperty property) { if (property == null) { return null; } return new InternalShareFileItemProperties(property.getCreationTime(), property.getLastAccessTime(), property.getLastWriteTime(), property.getChangeTime(), property.getLastModified(), property.getEtag()); } public static HandleItem transformHandleItem(com.azure.storage.file.share.implementation.models.HandleItem handleItem) { return new HandleItem() .setHandleId(handleItem.getHandleId()) .setPath(decodeName(handleItem.getPath())) .setSessionId(handleItem.getSessionId()) .setClientIp(handleItem.getClientIp()) .setFileId(handleItem.getFileId()) .setParentId(handleItem.getParentId()) .setLastReconnectTime(handleItem.getLastReconnectTime()) .setOpenTime(handleItem.getOpenTime()); } public static List<HandleItem> transformHandleItems(List<com.azure.storage.file.share.implementation.models.HandleItem> handleItems) { List<HandleItem> result = new ArrayList<>(); handleItems.forEach(item -> { result.add(transformHandleItem(item)); }); return result; } }
class ModelHelper { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private static final ClientLogger LOGGER = new ClientLogger(ModelHelper.class); private static final long MAX_FILE_PUT_RANGE_BYTES = 4 * Constants.MB; private static final int FILE_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new * object for safety. * * @param other The options to fill in defaults. * @return An object with defaults filled in for null values in the original. */ public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) { other = other == null ? new ParallelTransferOptions() : other; if (other.getBlockSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.blockSize", other.getBlockSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } if (other.getMaxSingleUploadSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.maxSingleUploadSize", other.getMaxSingleUploadSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } Long blockSize = other.getBlockSizeLong(); if (blockSize == null) { blockSize = MAX_FILE_PUT_RANGE_BYTES; } Integer maxConcurrency = other.getMaxConcurrency(); if (maxConcurrency == null) { maxConcurrency = FILE_DEFAULT_NUMBER_OF_BUFFERS; } Long maxSingleUploadSize = other.getMaxSingleUploadSizeLong(); if (maxSingleUploadSize == null) { maxSingleUploadSize = MAX_FILE_PUT_RANGE_BYTES; } return new ParallelTransferOptions() .setBlockSizeLong(blockSize) .setMaxConcurrency(maxConcurrency) .setProgressListener(other.getProgressListener()) .setMaxSingleUploadSizeLong(maxSingleUploadSize); } /** * Converts an internal type to a public type. * * @param option {@link ShareSnapshotsDeleteOptionType} * @return {@link DeleteSnapshotsOptionType} */ public static DeleteSnapshotsOptionType toDeleteSnapshotsOptionType(ShareSnapshotsDeleteOptionType option) { if (option == null) { return null; } switch (option) { case INCLUDE: return DeleteSnapshotsOptionType.INCLUDE; case INCLUDE_WITH_LEASED: return DeleteSnapshotsOptionType.INCLUDE_LEASED; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Invalid " + option.getClass())); } } /** * Transforms {@link ShareItemInternal} into a public {@link ShareItem}. * * @param shareItemInternal {@link ShareItemInternal} * @return {@link ShareItem} */ public static ShareItem populateShareItem(ShareItemInternal shareItemInternal) { ShareItem item = new ShareItem(); item.setName(shareItemInternal.getName()); item.setSnapshot(shareItemInternal.getSnapshot()); item.setDeleted(shareItemInternal.isDeleted()); item.setVersion(shareItemInternal.getVersion()); item.setProperties(populateShareProperties(shareItemInternal.getProperties())); item.setMetadata(shareItemInternal.getMetadata()); return item; } /** * Transforms {@link SharePropertiesInternal} into a public {@link ShareProperties}. * * @param sharePropertiesInternal {@link SharePropertiesInternal} * @return {@link ShareProperties} */ public static ShareProperties populateShareProperties(SharePropertiesInternal sharePropertiesInternal) { ShareProperties properties = new ShareProperties(); properties.setLastModified(sharePropertiesInternal.getLastModified()); properties.setETag(sharePropertiesInternal.getETag()); properties.setQuota(sharePropertiesInternal.getQuota()); properties.setProvisionedIops(sharePropertiesInternal.getProvisionedIops()); properties.setProvisionedIngressMBps(sharePropertiesInternal.getProvisionedIngressMBps()); properties.setProvisionedEgressMBps(sharePropertiesInternal.getProvisionedEgressMBps()); properties.setNextAllowedQuotaDowngradeTime(sharePropertiesInternal.getNextAllowedQuotaDowngradeTime()); properties.setDeletedTime(sharePropertiesInternal.getDeletedTime()); properties.setRemainingRetentionDays(sharePropertiesInternal.getRemainingRetentionDays()); properties.setAccessTier(sharePropertiesInternal.getAccessTier()); properties.setAccessTierChangeTime(sharePropertiesInternal.getAccessTierChangeTime()); properties.setAccessTierTransitionState(sharePropertiesInternal.getAccessTierTransitionState()); properties.setLeaseStatus(sharePropertiesInternal.getLeaseStatus()); properties.setLeaseState(sharePropertiesInternal.getLeaseState()); properties.setLeaseDuration(sharePropertiesInternal.getLeaseDuration()); properties.setProtocols(parseShareProtocols(sharePropertiesInternal.getEnabledProtocols())); properties.setRootSquash(sharePropertiesInternal.getRootSquash()); properties.setMetadata(sharePropertiesInternal.getMetadata()); properties.setProvisionedBandwidthMiBps(sharePropertiesInternal.getProvisionedBandwidthMiBps()); return properties; } /** * Parses a {@code String} into a {@code ShareProtocols}. Unrecognized protocols will be ignored. * * @param str The string to parse. * @return A {@code ShareProtocols} represented by the string. */ public static ShareProtocols parseShareProtocols(String str) { if (str == null) { return null; } ShareProtocols protocols = new ShareProtocols(); for (String s : str.split(",")) { switch (s) { case Constants.HeaderConstants.SMB_PROTOCOL: protocols.setSmbEnabled(true); break; case Constants.HeaderConstants.NFS_PROTOCOL: protocols.setNfsEnabled(true); break; default: } } return protocols; } public static ServicesListSharesSegmentHeaders transformListSharesHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ServicesListSharesSegmentHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static ShareFileDownloadHeaders transformFileDownloadHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ShareFileDownloadHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static String getETag(HttpHeaders headers) { return headers.getValue("ETag"); } public static ShareFileItemProperties transformFileProperty(FileProperty property) { if (property == null) { return null; } return new InternalShareFileItemProperties(property.getCreationTime(), property.getLastAccessTime(), property.getLastWriteTime(), property.getChangeTime(), property.getLastModified(), property.getEtag()); } public static HandleItem transformHandleItem(com.azure.storage.file.share.implementation.models.HandleItem handleItem) { return new HandleItem() .setHandleId(handleItem.getHandleId()) .setPath(decodeName(handleItem.getPath())) .setSessionId(handleItem.getSessionId()) .setClientIp(handleItem.getClientIp()) .setFileId(handleItem.getFileId()) .setParentId(handleItem.getParentId()) .setLastReconnectTime(handleItem.getLastReconnectTime()) .setOpenTime(handleItem.getOpenTime()); } public static List<HandleItem> transformHandleItems(List<com.azure.storage.file.share.implementation.models.HandleItem> handleItems) { List<HandleItem> result = new ArrayList<>(); handleItems.forEach(item -> { result.add(transformHandleItem(item)); }); return result; } }
I'd use `StandardCharsets.UTF_8.name()` instead of `.toString()`
public static String decodeName(StringEncoded stringEncoded) { if (stringEncoded.isEncoded() != null && stringEncoded.isEncoded()) { try { return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString()); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(e)); } } else { return stringEncoded.getContent(); } }
return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString());
public static String decodeName(StringEncoded stringEncoded) { if (stringEncoded.isEncoded() != null && stringEncoded.isEncoded()) { try { return URLDecoder.decode(stringEncoded.getContent(), StandardCharsets.UTF_8.toString()); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(e)); } } else { return stringEncoded.getContent(); } }
class ModelHelper { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private static final ClientLogger LOGGER = new ClientLogger(ModelHelper.class); private static final long MAX_FILE_PUT_RANGE_BYTES = 4 * Constants.MB; private static final int FILE_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new * object for safety. * * @param other The options to fill in defaults. * @return An object with defaults filled in for null values in the original. */ public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) { other = other == null ? new ParallelTransferOptions() : other; if (other.getBlockSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.blockSize", other.getBlockSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } if (other.getMaxSingleUploadSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.maxSingleUploadSize", other.getMaxSingleUploadSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } Long blockSize = other.getBlockSizeLong(); if (blockSize == null) { blockSize = MAX_FILE_PUT_RANGE_BYTES; } Integer maxConcurrency = other.getMaxConcurrency(); if (maxConcurrency == null) { maxConcurrency = FILE_DEFAULT_NUMBER_OF_BUFFERS; } Long maxSingleUploadSize = other.getMaxSingleUploadSizeLong(); if (maxSingleUploadSize == null) { maxSingleUploadSize = MAX_FILE_PUT_RANGE_BYTES; } return new ParallelTransferOptions() .setBlockSizeLong(blockSize) .setMaxConcurrency(maxConcurrency) .setProgressListener(other.getProgressListener()) .setMaxSingleUploadSizeLong(maxSingleUploadSize); } /** * Converts an internal type to a public type. * * @param option {@link ShareSnapshotsDeleteOptionType} * @return {@link DeleteSnapshotsOptionType} */ public static DeleteSnapshotsOptionType toDeleteSnapshotsOptionType(ShareSnapshotsDeleteOptionType option) { if (option == null) { return null; } switch (option) { case INCLUDE: return DeleteSnapshotsOptionType.INCLUDE; case INCLUDE_WITH_LEASED: return DeleteSnapshotsOptionType.INCLUDE_LEASED; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Invalid " + option.getClass())); } } /** * Transforms {@link ShareItemInternal} into a public {@link ShareItem}. * * @param shareItemInternal {@link ShareItemInternal} * @return {@link ShareItem} */ public static ShareItem populateShareItem(ShareItemInternal shareItemInternal) { ShareItem item = new ShareItem(); item.setName(shareItemInternal.getName()); item.setSnapshot(shareItemInternal.getSnapshot()); item.setDeleted(shareItemInternal.isDeleted()); item.setVersion(shareItemInternal.getVersion()); item.setProperties(populateShareProperties(shareItemInternal.getProperties())); item.setMetadata(shareItemInternal.getMetadata()); return item; } /** * Transforms {@link SharePropertiesInternal} into a public {@link ShareProperties}. * * @param sharePropertiesInternal {@link SharePropertiesInternal} * @return {@link ShareProperties} */ public static ShareProperties populateShareProperties(SharePropertiesInternal sharePropertiesInternal) { ShareProperties properties = new ShareProperties(); properties.setLastModified(sharePropertiesInternal.getLastModified()); properties.setETag(sharePropertiesInternal.getETag()); properties.setQuota(sharePropertiesInternal.getQuota()); properties.setProvisionedIops(sharePropertiesInternal.getProvisionedIops()); properties.setProvisionedIngressMBps(sharePropertiesInternal.getProvisionedIngressMBps()); properties.setProvisionedEgressMBps(sharePropertiesInternal.getProvisionedEgressMBps()); properties.setNextAllowedQuotaDowngradeTime(sharePropertiesInternal.getNextAllowedQuotaDowngradeTime()); properties.setDeletedTime(sharePropertiesInternal.getDeletedTime()); properties.setRemainingRetentionDays(sharePropertiesInternal.getRemainingRetentionDays()); properties.setAccessTier(sharePropertiesInternal.getAccessTier()); properties.setAccessTierChangeTime(sharePropertiesInternal.getAccessTierChangeTime()); properties.setAccessTierTransitionState(sharePropertiesInternal.getAccessTierTransitionState()); properties.setLeaseStatus(sharePropertiesInternal.getLeaseStatus()); properties.setLeaseState(sharePropertiesInternal.getLeaseState()); properties.setLeaseDuration(sharePropertiesInternal.getLeaseDuration()); properties.setProtocols(parseShareProtocols(sharePropertiesInternal.getEnabledProtocols())); properties.setRootSquash(sharePropertiesInternal.getRootSquash()); properties.setMetadata(sharePropertiesInternal.getMetadata()); properties.setProvisionedBandwidthMiBps(sharePropertiesInternal.getProvisionedBandwidthMiBps()); return properties; } /** * Parses a {@code String} into a {@code ShareProtocols}. Unrecognized protocols will be ignored. * * @param str The string to parse. * @return A {@code ShareProtocols} represented by the string. */ public static ShareProtocols parseShareProtocols(String str) { if (str == null) { return null; } ShareProtocols protocols = new ShareProtocols(); for (String s : str.split(",")) { switch (s) { case Constants.HeaderConstants.SMB_PROTOCOL: protocols.setSmbEnabled(true); break; case Constants.HeaderConstants.NFS_PROTOCOL: protocols.setNfsEnabled(true); break; default: } } return protocols; } public static ServicesListSharesSegmentHeaders transformListSharesHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ServicesListSharesSegmentHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static ShareFileDownloadHeaders transformFileDownloadHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ShareFileDownloadHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static String getETag(HttpHeaders headers) { return headers.getValue("ETag"); } public static ShareFileItemProperties transformFileProperty(FileProperty property) { if (property == null) { return null; } return new InternalShareFileItemProperties(property.getCreationTime(), property.getLastAccessTime(), property.getLastWriteTime(), property.getChangeTime(), property.getLastModified(), property.getEtag()); } public static HandleItem transformHandleItem(com.azure.storage.file.share.implementation.models.HandleItem handleItem) { return new HandleItem() .setHandleId(handleItem.getHandleId()) .setPath(decodeName(handleItem.getPath())) .setSessionId(handleItem.getSessionId()) .setClientIp(handleItem.getClientIp()) .setFileId(handleItem.getFileId()) .setParentId(handleItem.getParentId()) .setLastReconnectTime(handleItem.getLastReconnectTime()) .setOpenTime(handleItem.getOpenTime()); } public static List<HandleItem> transformHandleItems(List<com.azure.storage.file.share.implementation.models.HandleItem> handleItems) { List<HandleItem> result = new ArrayList<>(); handleItems.forEach(item -> { result.add(transformHandleItem(item)); }); return result; } }
class ModelHelper { private static final SerializerAdapter SERIALIZER = JacksonAdapter.createDefaultSerializerAdapter(); private static final ClientLogger LOGGER = new ClientLogger(ModelHelper.class); private static final long MAX_FILE_PUT_RANGE_BYTES = 4 * Constants.MB; private static final int FILE_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new * object for safety. * * @param other The options to fill in defaults. * @return An object with defaults filled in for null values in the original. */ public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) { other = other == null ? new ParallelTransferOptions() : other; if (other.getBlockSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.blockSize", other.getBlockSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } if (other.getMaxSingleUploadSizeLong() != null) { StorageImplUtils.assertInBounds("ParallelTransferOptions.maxSingleUploadSize", other.getMaxSingleUploadSizeLong(), 1, MAX_FILE_PUT_RANGE_BYTES); } Long blockSize = other.getBlockSizeLong(); if (blockSize == null) { blockSize = MAX_FILE_PUT_RANGE_BYTES; } Integer maxConcurrency = other.getMaxConcurrency(); if (maxConcurrency == null) { maxConcurrency = FILE_DEFAULT_NUMBER_OF_BUFFERS; } Long maxSingleUploadSize = other.getMaxSingleUploadSizeLong(); if (maxSingleUploadSize == null) { maxSingleUploadSize = MAX_FILE_PUT_RANGE_BYTES; } return new ParallelTransferOptions() .setBlockSizeLong(blockSize) .setMaxConcurrency(maxConcurrency) .setProgressListener(other.getProgressListener()) .setMaxSingleUploadSizeLong(maxSingleUploadSize); } /** * Converts an internal type to a public type. * * @param option {@link ShareSnapshotsDeleteOptionType} * @return {@link DeleteSnapshotsOptionType} */ public static DeleteSnapshotsOptionType toDeleteSnapshotsOptionType(ShareSnapshotsDeleteOptionType option) { if (option == null) { return null; } switch (option) { case INCLUDE: return DeleteSnapshotsOptionType.INCLUDE; case INCLUDE_WITH_LEASED: return DeleteSnapshotsOptionType.INCLUDE_LEASED; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Invalid " + option.getClass())); } } /** * Transforms {@link ShareItemInternal} into a public {@link ShareItem}. * * @param shareItemInternal {@link ShareItemInternal} * @return {@link ShareItem} */ public static ShareItem populateShareItem(ShareItemInternal shareItemInternal) { ShareItem item = new ShareItem(); item.setName(shareItemInternal.getName()); item.setSnapshot(shareItemInternal.getSnapshot()); item.setDeleted(shareItemInternal.isDeleted()); item.setVersion(shareItemInternal.getVersion()); item.setProperties(populateShareProperties(shareItemInternal.getProperties())); item.setMetadata(shareItemInternal.getMetadata()); return item; } /** * Transforms {@link SharePropertiesInternal} into a public {@link ShareProperties}. * * @param sharePropertiesInternal {@link SharePropertiesInternal} * @return {@link ShareProperties} */ public static ShareProperties populateShareProperties(SharePropertiesInternal sharePropertiesInternal) { ShareProperties properties = new ShareProperties(); properties.setLastModified(sharePropertiesInternal.getLastModified()); properties.setETag(sharePropertiesInternal.getETag()); properties.setQuota(sharePropertiesInternal.getQuota()); properties.setProvisionedIops(sharePropertiesInternal.getProvisionedIops()); properties.setProvisionedIngressMBps(sharePropertiesInternal.getProvisionedIngressMBps()); properties.setProvisionedEgressMBps(sharePropertiesInternal.getProvisionedEgressMBps()); properties.setNextAllowedQuotaDowngradeTime(sharePropertiesInternal.getNextAllowedQuotaDowngradeTime()); properties.setDeletedTime(sharePropertiesInternal.getDeletedTime()); properties.setRemainingRetentionDays(sharePropertiesInternal.getRemainingRetentionDays()); properties.setAccessTier(sharePropertiesInternal.getAccessTier()); properties.setAccessTierChangeTime(sharePropertiesInternal.getAccessTierChangeTime()); properties.setAccessTierTransitionState(sharePropertiesInternal.getAccessTierTransitionState()); properties.setLeaseStatus(sharePropertiesInternal.getLeaseStatus()); properties.setLeaseState(sharePropertiesInternal.getLeaseState()); properties.setLeaseDuration(sharePropertiesInternal.getLeaseDuration()); properties.setProtocols(parseShareProtocols(sharePropertiesInternal.getEnabledProtocols())); properties.setRootSquash(sharePropertiesInternal.getRootSquash()); properties.setMetadata(sharePropertiesInternal.getMetadata()); properties.setProvisionedBandwidthMiBps(sharePropertiesInternal.getProvisionedBandwidthMiBps()); return properties; } /** * Parses a {@code String} into a {@code ShareProtocols}. Unrecognized protocols will be ignored. * * @param str The string to parse. * @return A {@code ShareProtocols} represented by the string. */ public static ShareProtocols parseShareProtocols(String str) { if (str == null) { return null; } ShareProtocols protocols = new ShareProtocols(); for (String s : str.split(",")) { switch (s) { case Constants.HeaderConstants.SMB_PROTOCOL: protocols.setSmbEnabled(true); break; case Constants.HeaderConstants.NFS_PROTOCOL: protocols.setNfsEnabled(true); break; default: } } return protocols; } public static ServicesListSharesSegmentHeaders transformListSharesHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ServicesListSharesSegmentHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static ShareFileDownloadHeaders transformFileDownloadHeaders(HttpHeaders headers) { if (headers == null) { return null; } try { return SERIALIZER.deserialize(headers, ShareFileDownloadHeaders.class); } catch (IOException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static String getETag(HttpHeaders headers) { return headers.getValue("ETag"); } public static ShareFileItemProperties transformFileProperty(FileProperty property) { if (property == null) { return null; } return new InternalShareFileItemProperties(property.getCreationTime(), property.getLastAccessTime(), property.getLastWriteTime(), property.getChangeTime(), property.getLastModified(), property.getEtag()); } public static HandleItem transformHandleItem(com.azure.storage.file.share.implementation.models.HandleItem handleItem) { return new HandleItem() .setHandleId(handleItem.getHandleId()) .setPath(decodeName(handleItem.getPath())) .setSessionId(handleItem.getSessionId()) .setClientIp(handleItem.getClientIp()) .setFileId(handleItem.getFileId()) .setParentId(handleItem.getParentId()) .setLastReconnectTime(handleItem.getLastReconnectTime()) .setOpenTime(handleItem.getOpenTime()); } public static List<HandleItem> transformHandleItems(List<com.azure.storage.file.share.implementation.models.HandleItem> handleItems) { List<HandleItem> result = new ArrayList<>(); handleItems.forEach(item -> { result.add(transformHandleItem(item)); }); return result; } }
nit; not sure but if we declare` valueSupplier` as `final` does that help removing this `if` check?
private void updateCurrentValue(String attributesId, Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null) { return; } CurrentValue valueSupplier; if (maxCapacityReached) { valueSupplier = seqNoSubscriptions.get(attributesId); } else { TelemetryAttributes attributes = getOrCreate(common, attributesId, checkpoint, null); if (attributes == null) { return; } valueSupplier = seqNoSubscriptions.computeIfAbsent(attributesId, a -> { AtomicReference<Long> lastSeqNo = new AtomicReference<>(); return new CurrentValue(lastSequenceNumber.registerCallback(() -> lastSeqNo.get(), attributes), lastSeqNo); }); } if (valueSupplier != null) { valueSupplier.set(checkpoint.getSequenceNumber()); } }
if (valueSupplier != null) {
private void updateCurrentValue(String attributesId, Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null) { return; } final CurrentValue valueSupplier; if (maxCapacityReached) { valueSupplier = seqNoSubscriptions.get(attributesId); if (valueSupplier == null) { return; } } else { TelemetryAttributes attributes = getOrCreate(common, attributesId, checkpoint, null); if (attributes == null) { return; } valueSupplier = seqNoSubscriptions.computeIfAbsent(attributesId, a -> { AtomicReference<Long> lastSeqNo = new AtomicReference<>(); return new CurrentValue(lastSequenceNumber.registerCallback(() -> lastSeqNo.get(), attributes), lastSeqNo); }); } valueSupplier.set(checkpoint.getSequenceNumber()); }
class MetricsHelper { private static final ClientLogger LOGGER = new ClientLogger(MetricsHelper.class); private static final String ENTITY_NAME_KEY = "entityName"; private static final String HOSTNAME_KEY = "hostName"; private static final String PARTITION_ID_KEY = "partitionId"; private static final String CONSUMER_GROUP_KEY = "consumerGroup"; private static final String STATUS_KEY = "status"; private static final int MAX_ATTRIBUTES_SETS = 100; private static final String PROPERTIES_FILE = "azure-messaging-eventhubs-checkpointstore-blob.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String LIBRARY_NAME; private static final String LIBRARY_VERSION; private static final String UNKNOWN = "UNKNOWN"; static { final Map<String, String> properties = CoreUtils.getProperties(PROPERTIES_FILE); LIBRARY_NAME = properties.getOrDefault(NAME_KEY, UNKNOWN); LIBRARY_VERSION = properties.getOrDefault(VERSION_KEY, UNKNOWN); } private final ConcurrentHashMap<String, TelemetryAttributes> common = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointFailure = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointSuccess = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, CurrentValue> seqNoSubscriptions = new ConcurrentHashMap<>(); private volatile boolean maxCapacityReached = false; private final Meter meter; private final LongGauge lastSequenceNumber; private final LongCounter checkpointCounter; private final boolean isEnabled; MetricsHelper(MetricsOptions metricsOptions, MeterProvider meterProvider) { if (areMetricsEnabled(metricsOptions)) { this.meter = meterProvider.createMeter(LIBRARY_NAME, LIBRARY_VERSION, metricsOptions); this.isEnabled = this.meter.isEnabled(); } else { this.isEnabled = false; this.meter = null; } if (isEnabled) { this.lastSequenceNumber = this.meter.createLongGauge("messaging.eventhubs.checkpoint.sequence_number", "Last successfully checkpointed sequence number.", "seqNo"); this.checkpointCounter = this.meter.createLongCounter("messaging.eventhubs.checkpoints", "Number of checkpoints.", null); } else { this.lastSequenceNumber = null; this.checkpointCounter = null; } } void reportCheckpoint(Checkpoint checkpoint, String attributesId, boolean success) { if (!isEnabled || !(lastSequenceNumber.isEnabled() && checkpointCounter.isEnabled())) { return; } if (!maxCapacityReached && (seqNoSubscriptions.size() >= MAX_ATTRIBUTES_SETS || common.size() >= MAX_ATTRIBUTES_SETS)) { LOGGER.error("Too many attribute combinations are reported for checkpoint metrics, ignoring any new dimensions."); maxCapacityReached = true; } if (lastSequenceNumber.isEnabled() && success) { updateCurrentValue(attributesId, checkpoint); } if (checkpointCounter.isEnabled()) { TelemetryAttributes attributes = null; if (success) { attributes = getOrCreate(checkpointSuccess, attributesId, checkpoint, "ok"); } else { attributes = getOrCreate(checkpointFailure, attributesId, checkpoint, "error"); } if (attributes != null) { checkpointCounter.add(1, attributes, Context.NONE); } } } private TelemetryAttributes getOrCreate(ConcurrentHashMap<String, TelemetryAttributes> source, String attributesId, Checkpoint checkpoint, String status) { if (maxCapacityReached) { return source.get(attributesId); } return source.computeIfAbsent(attributesId, i -> meter.createAttributes(createAttributes(checkpoint, status))); } private Map<String, Object> createAttributes(Checkpoint checkpoint, String status) { Map<String, Object> attributesMap = new HashMap<>(); attributesMap.put(HOSTNAME_KEY, checkpoint.getFullyQualifiedNamespace()); attributesMap.put(ENTITY_NAME_KEY, checkpoint.getEventHubName()); attributesMap.put(PARTITION_ID_KEY, checkpoint.getPartitionId()); attributesMap.put(CONSUMER_GROUP_KEY, checkpoint.getConsumerGroup()); if (status != null) { attributesMap.put(STATUS_KEY, status); } return attributesMap; } private static boolean areMetricsEnabled(MetricsOptions options) { if (options == null || options.isEnabled()) { return true; } return false; } private static class CurrentValue { private final AtomicReference<Long> lastSeqNo; private final AutoCloseable subscription; CurrentValue(AutoCloseable subscription, AtomicReference<Long> lastSeqNo) { this.subscription = subscription; this.lastSeqNo = lastSeqNo; } void set(long value) { lastSeqNo.set(value); } void close() { if (subscription != null) { try { subscription.close(); } catch (Exception e) { throw LOGGER.logThrowableAsWarning(new RuntimeException(e)); } } } } }
class MetricsHelper { private static final ClientLogger LOGGER = new ClientLogger(MetricsHelper.class); private static final String ENTITY_NAME_KEY = "entityName"; private static final String HOSTNAME_KEY = "hostName"; private static final String PARTITION_ID_KEY = "partitionId"; private static final String CONSUMER_GROUP_KEY = "consumerGroup"; private static final String STATUS_KEY = "status"; private static final int MAX_ATTRIBUTES_SETS = 100; private static final String PROPERTIES_FILE = "azure-messaging-eventhubs-checkpointstore-blob.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String LIBRARY_NAME; private static final String LIBRARY_VERSION; private static final String UNKNOWN = "UNKNOWN"; static { final Map<String, String> properties = CoreUtils.getProperties(PROPERTIES_FILE); LIBRARY_NAME = properties.getOrDefault(NAME_KEY, UNKNOWN); LIBRARY_VERSION = properties.getOrDefault(VERSION_KEY, UNKNOWN); } private final ConcurrentHashMap<String, TelemetryAttributes> common = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointFailure = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointSuccess = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, CurrentValue> seqNoSubscriptions = new ConcurrentHashMap<>(); private volatile boolean maxCapacityReached = false; private final Meter meter; private final LongGauge lastSequenceNumber; private final LongCounter checkpointCounter; private final boolean isEnabled; MetricsHelper(MetricsOptions metricsOptions, MeterProvider meterProvider) { if (areMetricsEnabled(metricsOptions)) { this.meter = meterProvider.createMeter(LIBRARY_NAME, LIBRARY_VERSION, metricsOptions); this.isEnabled = this.meter.isEnabled(); } else { this.meter = null; this.isEnabled = false; } if (isEnabled) { this.lastSequenceNumber = this.meter.createLongGauge("messaging.eventhubs.checkpoint.sequence_number", "Last successfully checkpointed sequence number.", "seqNo"); this.checkpointCounter = this.meter.createLongCounter("messaging.eventhubs.checkpoints", "Number of checkpoints.", null); } else { this.lastSequenceNumber = null; this.checkpointCounter = null; } } void reportCheckpoint(Checkpoint checkpoint, String attributesId, boolean success) { if (!isEnabled || !(lastSequenceNumber.isEnabled() && checkpointCounter.isEnabled())) { return; } if (!maxCapacityReached && (seqNoSubscriptions.size() >= MAX_ATTRIBUTES_SETS || common.size() >= MAX_ATTRIBUTES_SETS)) { LOGGER.error("Too many attribute combinations are reported for checkpoint metrics, ignoring any new dimensions."); maxCapacityReached = true; } if (lastSequenceNumber.isEnabled() && success) { updateCurrentValue(attributesId, checkpoint); } if (checkpointCounter.isEnabled()) { TelemetryAttributes attributes = null; if (success) { attributes = getOrCreate(checkpointSuccess, attributesId, checkpoint, "ok"); } else { attributes = getOrCreate(checkpointFailure, attributesId, checkpoint, "error"); } if (attributes != null) { checkpointCounter.add(1, attributes, Context.NONE); } } } private TelemetryAttributes getOrCreate(ConcurrentHashMap<String, TelemetryAttributes> source, String attributesId, Checkpoint checkpoint, String status) { if (maxCapacityReached) { return source.get(attributesId); } return source.computeIfAbsent(attributesId, i -> meter.createAttributes(createAttributes(checkpoint, status))); } private Map<String, Object> createAttributes(Checkpoint checkpoint, String status) { Map<String, Object> attributesMap = new HashMap<>(5); attributesMap.put(HOSTNAME_KEY, checkpoint.getFullyQualifiedNamespace()); attributesMap.put(ENTITY_NAME_KEY, checkpoint.getEventHubName()); attributesMap.put(PARTITION_ID_KEY, checkpoint.getPartitionId()); attributesMap.put(CONSUMER_GROUP_KEY, checkpoint.getConsumerGroup()); if (status != null) { attributesMap.put(STATUS_KEY, status); } return attributesMap; } private static boolean areMetricsEnabled(MetricsOptions options) { if (options == null || options.isEnabled()) { return true; } return false; } private static class CurrentValue { private final AtomicReference<Long> lastSeqNo; private final AutoCloseable subscription; CurrentValue(AutoCloseable subscription, AtomicReference<Long> lastSeqNo) { this.subscription = subscription; this.lastSeqNo = lastSeqNo; } void set(long value) { lastSeqNo.set(value); } void close() { if (subscription != null) { try { subscription.close(); } catch (Exception e) { throw LOGGER.logThrowableAsWarning(new RuntimeException(e)); } } } } }
it could be that it's null if there are too many attributes, but I tried to make it a little bit more readable :)
private void updateCurrentValue(String attributesId, Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null) { return; } CurrentValue valueSupplier; if (maxCapacityReached) { valueSupplier = seqNoSubscriptions.get(attributesId); } else { TelemetryAttributes attributes = getOrCreate(common, attributesId, checkpoint, null); if (attributes == null) { return; } valueSupplier = seqNoSubscriptions.computeIfAbsent(attributesId, a -> { AtomicReference<Long> lastSeqNo = new AtomicReference<>(); return new CurrentValue(lastSequenceNumber.registerCallback(() -> lastSeqNo.get(), attributes), lastSeqNo); }); } if (valueSupplier != null) { valueSupplier.set(checkpoint.getSequenceNumber()); } }
if (valueSupplier != null) {
private void updateCurrentValue(String attributesId, Checkpoint checkpoint) { if (checkpoint.getSequenceNumber() == null) { return; } final CurrentValue valueSupplier; if (maxCapacityReached) { valueSupplier = seqNoSubscriptions.get(attributesId); if (valueSupplier == null) { return; } } else { TelemetryAttributes attributes = getOrCreate(common, attributesId, checkpoint, null); if (attributes == null) { return; } valueSupplier = seqNoSubscriptions.computeIfAbsent(attributesId, a -> { AtomicReference<Long> lastSeqNo = new AtomicReference<>(); return new CurrentValue(lastSequenceNumber.registerCallback(() -> lastSeqNo.get(), attributes), lastSeqNo); }); } valueSupplier.set(checkpoint.getSequenceNumber()); }
class MetricsHelper { private static final ClientLogger LOGGER = new ClientLogger(MetricsHelper.class); private static final String ENTITY_NAME_KEY = "entityName"; private static final String HOSTNAME_KEY = "hostName"; private static final String PARTITION_ID_KEY = "partitionId"; private static final String CONSUMER_GROUP_KEY = "consumerGroup"; private static final String STATUS_KEY = "status"; private static final int MAX_ATTRIBUTES_SETS = 100; private static final String PROPERTIES_FILE = "azure-messaging-eventhubs-checkpointstore-blob.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String LIBRARY_NAME; private static final String LIBRARY_VERSION; private static final String UNKNOWN = "UNKNOWN"; static { final Map<String, String> properties = CoreUtils.getProperties(PROPERTIES_FILE); LIBRARY_NAME = properties.getOrDefault(NAME_KEY, UNKNOWN); LIBRARY_VERSION = properties.getOrDefault(VERSION_KEY, UNKNOWN); } private final ConcurrentHashMap<String, TelemetryAttributes> common = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointFailure = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointSuccess = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, CurrentValue> seqNoSubscriptions = new ConcurrentHashMap<>(); private volatile boolean maxCapacityReached = false; private final Meter meter; private final LongGauge lastSequenceNumber; private final LongCounter checkpointCounter; private final boolean isEnabled; MetricsHelper(MetricsOptions metricsOptions, MeterProvider meterProvider) { if (areMetricsEnabled(metricsOptions)) { this.meter = meterProvider.createMeter(LIBRARY_NAME, LIBRARY_VERSION, metricsOptions); this.isEnabled = this.meter.isEnabled(); } else { this.isEnabled = false; this.meter = null; } if (isEnabled) { this.lastSequenceNumber = this.meter.createLongGauge("messaging.eventhubs.checkpoint.sequence_number", "Last successfully checkpointed sequence number.", "seqNo"); this.checkpointCounter = this.meter.createLongCounter("messaging.eventhubs.checkpoints", "Number of checkpoints.", null); } else { this.lastSequenceNumber = null; this.checkpointCounter = null; } } void reportCheckpoint(Checkpoint checkpoint, String attributesId, boolean success) { if (!isEnabled || !(lastSequenceNumber.isEnabled() && checkpointCounter.isEnabled())) { return; } if (!maxCapacityReached && (seqNoSubscriptions.size() >= MAX_ATTRIBUTES_SETS || common.size() >= MAX_ATTRIBUTES_SETS)) { LOGGER.error("Too many attribute combinations are reported for checkpoint metrics, ignoring any new dimensions."); maxCapacityReached = true; } if (lastSequenceNumber.isEnabled() && success) { updateCurrentValue(attributesId, checkpoint); } if (checkpointCounter.isEnabled()) { TelemetryAttributes attributes = null; if (success) { attributes = getOrCreate(checkpointSuccess, attributesId, checkpoint, "ok"); } else { attributes = getOrCreate(checkpointFailure, attributesId, checkpoint, "error"); } if (attributes != null) { checkpointCounter.add(1, attributes, Context.NONE); } } } private TelemetryAttributes getOrCreate(ConcurrentHashMap<String, TelemetryAttributes> source, String attributesId, Checkpoint checkpoint, String status) { if (maxCapacityReached) { return source.get(attributesId); } return source.computeIfAbsent(attributesId, i -> meter.createAttributes(createAttributes(checkpoint, status))); } private Map<String, Object> createAttributes(Checkpoint checkpoint, String status) { Map<String, Object> attributesMap = new HashMap<>(); attributesMap.put(HOSTNAME_KEY, checkpoint.getFullyQualifiedNamespace()); attributesMap.put(ENTITY_NAME_KEY, checkpoint.getEventHubName()); attributesMap.put(PARTITION_ID_KEY, checkpoint.getPartitionId()); attributesMap.put(CONSUMER_GROUP_KEY, checkpoint.getConsumerGroup()); if (status != null) { attributesMap.put(STATUS_KEY, status); } return attributesMap; } private static boolean areMetricsEnabled(MetricsOptions options) { if (options == null || options.isEnabled()) { return true; } return false; } private static class CurrentValue { private final AtomicReference<Long> lastSeqNo; private final AutoCloseable subscription; CurrentValue(AutoCloseable subscription, AtomicReference<Long> lastSeqNo) { this.subscription = subscription; this.lastSeqNo = lastSeqNo; } void set(long value) { lastSeqNo.set(value); } void close() { if (subscription != null) { try { subscription.close(); } catch (Exception e) { throw LOGGER.logThrowableAsWarning(new RuntimeException(e)); } } } } }
class MetricsHelper { private static final ClientLogger LOGGER = new ClientLogger(MetricsHelper.class); private static final String ENTITY_NAME_KEY = "entityName"; private static final String HOSTNAME_KEY = "hostName"; private static final String PARTITION_ID_KEY = "partitionId"; private static final String CONSUMER_GROUP_KEY = "consumerGroup"; private static final String STATUS_KEY = "status"; private static final int MAX_ATTRIBUTES_SETS = 100; private static final String PROPERTIES_FILE = "azure-messaging-eventhubs-checkpointstore-blob.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String LIBRARY_NAME; private static final String LIBRARY_VERSION; private static final String UNKNOWN = "UNKNOWN"; static { final Map<String, String> properties = CoreUtils.getProperties(PROPERTIES_FILE); LIBRARY_NAME = properties.getOrDefault(NAME_KEY, UNKNOWN); LIBRARY_VERSION = properties.getOrDefault(VERSION_KEY, UNKNOWN); } private final ConcurrentHashMap<String, TelemetryAttributes> common = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointFailure = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, TelemetryAttributes> checkpointSuccess = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, CurrentValue> seqNoSubscriptions = new ConcurrentHashMap<>(); private volatile boolean maxCapacityReached = false; private final Meter meter; private final LongGauge lastSequenceNumber; private final LongCounter checkpointCounter; private final boolean isEnabled; MetricsHelper(MetricsOptions metricsOptions, MeterProvider meterProvider) { if (areMetricsEnabled(metricsOptions)) { this.meter = meterProvider.createMeter(LIBRARY_NAME, LIBRARY_VERSION, metricsOptions); this.isEnabled = this.meter.isEnabled(); } else { this.meter = null; this.isEnabled = false; } if (isEnabled) { this.lastSequenceNumber = this.meter.createLongGauge("messaging.eventhubs.checkpoint.sequence_number", "Last successfully checkpointed sequence number.", "seqNo"); this.checkpointCounter = this.meter.createLongCounter("messaging.eventhubs.checkpoints", "Number of checkpoints.", null); } else { this.lastSequenceNumber = null; this.checkpointCounter = null; } } void reportCheckpoint(Checkpoint checkpoint, String attributesId, boolean success) { if (!isEnabled || !(lastSequenceNumber.isEnabled() && checkpointCounter.isEnabled())) { return; } if (!maxCapacityReached && (seqNoSubscriptions.size() >= MAX_ATTRIBUTES_SETS || common.size() >= MAX_ATTRIBUTES_SETS)) { LOGGER.error("Too many attribute combinations are reported for checkpoint metrics, ignoring any new dimensions."); maxCapacityReached = true; } if (lastSequenceNumber.isEnabled() && success) { updateCurrentValue(attributesId, checkpoint); } if (checkpointCounter.isEnabled()) { TelemetryAttributes attributes = null; if (success) { attributes = getOrCreate(checkpointSuccess, attributesId, checkpoint, "ok"); } else { attributes = getOrCreate(checkpointFailure, attributesId, checkpoint, "error"); } if (attributes != null) { checkpointCounter.add(1, attributes, Context.NONE); } } } private TelemetryAttributes getOrCreate(ConcurrentHashMap<String, TelemetryAttributes> source, String attributesId, Checkpoint checkpoint, String status) { if (maxCapacityReached) { return source.get(attributesId); } return source.computeIfAbsent(attributesId, i -> meter.createAttributes(createAttributes(checkpoint, status))); } private Map<String, Object> createAttributes(Checkpoint checkpoint, String status) { Map<String, Object> attributesMap = new HashMap<>(5); attributesMap.put(HOSTNAME_KEY, checkpoint.getFullyQualifiedNamespace()); attributesMap.put(ENTITY_NAME_KEY, checkpoint.getEventHubName()); attributesMap.put(PARTITION_ID_KEY, checkpoint.getPartitionId()); attributesMap.put(CONSUMER_GROUP_KEY, checkpoint.getConsumerGroup()); if (status != null) { attributesMap.put(STATUS_KEY, status); } return attributesMap; } private static boolean areMetricsEnabled(MetricsOptions options) { if (options == null || options.isEnabled()) { return true; } return false; } private static class CurrentValue { private final AtomicReference<Long> lastSeqNo; private final AutoCloseable subscription; CurrentValue(AutoCloseable subscription, AtomicReference<Long> lastSeqNo) { this.subscription = subscription; this.lastSeqNo = lastSeqNo; } void set(long value) { lastSeqNo.set(value); } void close() { if (subscription != null) { try { subscription.close(); } catch (Exception e) { throw LOGGER.logThrowableAsWarning(new RuntimeException(e)); } } } } }
No - without it the leak would still exist. This test is ensuring that with the doFinally we actually dispose.
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.info("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
.doFinally((SignalType signal) -> {
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseClient(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) { List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null) .collectList() .block(); if (res.size() != 0) { CosmosAsyncDatabase database = client.getDatabase(databaseId); database.read().block(); return database; } else { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.debug("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.debug("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } static protected void safeClose(CosmosAsyncClient client) { if (client != null) { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeCloseAsync(CosmosAsyncClient client) { if (client != null) { new Thread(() -> { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } }).start(); } } static protected void safeCloseClient(CosmosAsyncClient client) { if (client != null) { try { logger.info("closing client ..."); client.close(); logger.info("closing client completed"); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) { if (database != null) { List<CosmosContainerProperties> collections = database.readAllContainers() .collectList() .block(); for (CosmosContainerProperties collection : collections) { database.getContainer(collection.getId()).delete().block(); } } } static protected void safeDeleteCollection(CosmosAsyncContainer collection) { if (collection != null) { try { collection.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) { if (database != null && collectionId != null) { try { database.getContainer(collectionId).delete().block(); } catch (Exception e) { } } } static protected void safeDeleteDatabase(CosmosAsyncDatabase database) { if (database != null) { try { database.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteSyncDatabase(CosmosDatabase database) { if (database != null) { try { logger.info("attempting to delete database ...."); database.delete(); logger.info("database deletion completed"); } catch (Exception e) { logger.error("failed to delete sync database", e); } } } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseAsync(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } }
Actually - refactored this so that the doFianlly is added within BulkExecutor. So resolved this comment.
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.info("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
.doFinally((SignalType signal) -> {
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseClient(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) { List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null) .collectList() .block(); if (res.size() != 0) { CosmosAsyncDatabase database = client.getDatabase(databaseId); database.read().block(); return database; } else { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.debug("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.debug("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } static protected void safeClose(CosmosAsyncClient client) { if (client != null) { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeCloseAsync(CosmosAsyncClient client) { if (client != null) { new Thread(() -> { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } }).start(); } } static protected void safeCloseClient(CosmosAsyncClient client) { if (client != null) { try { logger.info("closing client ..."); client.close(); logger.info("closing client completed"); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) { if (database != null) { List<CosmosContainerProperties> collections = database.readAllContainers() .collectList() .block(); for (CosmosContainerProperties collection : collections) { database.getContainer(collection.getId()).delete().block(); } } } static protected void safeDeleteCollection(CosmosAsyncContainer collection) { if (collection != null) { try { collection.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) { if (database != null && collectionId != null) { try { database.getContainer(collectionId).delete().block(); } catch (Exception e) { } } } static protected void safeDeleteDatabase(CosmosAsyncDatabase database) { if (database != null) { try { database.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteSyncDatabase(CosmosDatabase database) { if (database != null) { try { logger.info("attempting to delete database ...."); database.delete(); logger.info("database deletion completed"); } catch (Exception e) { logger.error("failed to delete sync database", e); } } } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseAsync(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } }
Fixed
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.info("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>();
public void executeBulk_cancel() throws InterruptedException { int totalRequest = 100; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); Flux<CosmosItemOperation> inputFlux = Flux .fromArray(itemOperationsArray) .delayElements(Duration.ofMillis(100)); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, inputFlux, cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Disposable disposable = bulkResponseFlux.subscribe(); disposable.dispose(); int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseClient(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) { List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null) .collectList() .block(); if (res.size() != 0) { CosmosAsyncDatabase database = client.getDatabase(databaseId); database.read().block(); return database; } else { CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId); client.createDatabase(databaseSettings).block(); return client.getDatabase(databaseSettings.getId()); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<com.azure.cosmos.models.CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } com.azure.cosmos.models.CosmosItemOperation[] itemOperationsArray = new com.azure.cosmos.models.CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor .execute() .doFinally((SignalType signal) -> { if (signal == SignalType.ON_COMPLETE) { logger.debug("BulkExecutor.execute flux completed - executor.getItemsLeftSnapshot(), executor.getOperationContext()); } else { int itemsLeftSnapshot = executor.getItemsLeftSnapshot(); if (itemsLeftSnapshot > 0) { logger.info("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } else { logger.debug("BulkExecutor.execute flux terminated - Signal: {} - + "Context: {}", signal, itemsLeftSnapshot, executor.getOperationContext()); } } executor.dispose(); })); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } static protected void safeClose(CosmosAsyncClient client) { if (client != null) { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeCloseAsync(CosmosAsyncClient client) { if (client != null) { new Thread(() -> { try { client.close(); } catch (Exception e) { logger.error("failed to close client", e); } }).start(); } } static protected void safeCloseClient(CosmosAsyncClient client) { if (client != null) { try { logger.info("closing client ..."); client.close(); logger.info("closing client completed"); } catch (Exception e) { logger.error("failed to close client", e); } } } static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) { if (database != null) { List<CosmosContainerProperties> collections = database.readAllContainers() .collectList() .block(); for (CosmosContainerProperties collection : collections) { database.getContainer(collection.getId()).delete().block(); } } } static protected void safeDeleteCollection(CosmosAsyncContainer collection) { if (collection != null) { try { collection.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) { if (database != null && collectionId != null) { try { database.getContainer(collectionId).delete().block(); } catch (Exception e) { } } } static protected void safeDeleteDatabase(CosmosAsyncDatabase database) { if (database != null) { try { database.delete().block(); } catch (Exception e) { } } } static protected void safeDeleteSyncDatabase(CosmosDatabase database) { if (database != null) { try { logger.info("attempting to delete database ...."); database.delete(); logger.info("database deletion completed"); } catch (Exception e) { logger.error("failed to delete sync database", e); } } } }
class BulkExecutorTest extends BatchTestBase { private CosmosAsyncClient client; private CosmosAsyncContainer container; private CosmosAsyncDatabase database; private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); @Factory(dataProvider = "clientBuilders") public BulkExecutorTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @AfterClass(groups = { "emulator" }, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteDatabase(database); safeCloseAsync(client); } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.container != null) { try { this.container.delete().block(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.container = null; } @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, preExistingDatabaseId); } static protected CosmosAsyncContainer createContainer(CosmosAsyncDatabase database) { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); database.createContainer(containerProperties).block(); return database.getContainer(collectionName); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void executeBulk_complete() throws InterruptedException { int totalRequest = 10; this.container = createContainer(database); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); BatchTestBase.TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); BatchTestBase.EventDoc eventDoc = new BatchTestBase.EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(CosmosBulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } CosmosItemOperation[] itemOperationsArray = new CosmosItemOperation[cosmosItemOperations.size()]; cosmosItemOperations.toArray(itemOperationsArray); CosmosBulkExecutionOptions cosmosBulkExecutionOptions = new CosmosBulkExecutionOptions(); final BulkExecutor<BulkExecutorTest> executor = new BulkExecutor<>( container, Flux.fromArray(itemOperationsArray), cosmosBulkExecutionOptions); Flux<com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponseFlux = Flux.deferContextual(context -> executor.execute()); Mono<List<CosmosBulkOperationResponse<BulkExecutorTest>>> convertToListMono = bulkResponseFlux .collect(Collectors.toList()); List<CosmosBulkOperationResponse<BulkExecutorTest>> bulkResponse = convertToListMono.block(); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (com.azure.cosmos.models.CosmosBulkOperationResponse<BulkExecutorTest> cosmosBulkOperationResponse : bulkResponse) { com.azure.cosmos.models.CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } int iterations = 0; while (true) { assertThat(iterations < 100); if (executor.isDisposed()) { break; } Thread.sleep(10); iterations++; } } }
log warning or error or, if you're confident it's invalid, then throw?
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } if (length != null && length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } if (bufferContent && length != null && length > MAX_ARRAY_SIZE) { return monoError(LOGGER, new IllegalArgumentException( String.format("'length' cannot be greater than %d when content buffering is enabled.", MAX_ARRAY_SIZE))); } if (bufferContent) { long[] trueLength = new long[]{0}; return data.map(buffer -> { int bufferSize = buffer.remaining(); ByteBuffer copy = ByteBuffer.allocate(bufferSize); trueLength[0] += bufferSize; copy.put(buffer); copy.flip(); return copy; }) .collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add) .map(buffers -> { return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers) .map(ByteBuffer::duplicate), (length != null) ? length : trueLength[0], true)); }); } else { return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } }
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } if (length != null && length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } if (bufferContent && length != null && length > MAX_ARRAY_SIZE) { return monoError(LOGGER, new IllegalArgumentException( String.format("'length' cannot be greater than %d when content buffering is enabled.", MAX_ARRAY_SIZE))); } if (bufferContent) { long[] trueLength = new long[]{0}; return data.map(buffer -> { int bufferSize = buffer.remaining(); ByteBuffer copy = ByteBuffer.allocate(bufferSize); trueLength[0] += bufferSize; copy.put(buffer); copy.flip(); return copy; }) .collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add) .map(buffers -> { return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers) .map(ByteBuffer::duplicate), (length != null) ? length : trueLength[0], true)); }); } else { return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } static { BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() { @Override public BinaryData createBinaryData(BinaryDataContent content) { return new BinaryData(content); } @Override public BinaryDataContent getContent(BinaryData binaryData) { return binaryData.content; } }); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return fromStream(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream, Long length) { return new BinaryData(new InputStreamContent(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return fromStreamAsync(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) { return Mono.fromCallable(() -> fromStream(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { return fromFlux(data, length, true); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final boolean shouldAggregateData = false; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * {@link StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromString * <pre> * final String data = &quot;Some Data&quot;; * & * BinaryData binaryData = BinaryData.fromString& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input byte * array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of * this instance will result in the contents of this BinaryData instance being updated as well. To safely update the * byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromBytes * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile --> * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile --> * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, null, null)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length) { return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * int chunkSize = 8092; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, position, length)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the underlying * byte array. Modifying the contents of the returned byte array will also change the content of this BinaryData * instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended to make a * copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * & * & * & * & * & * * * BinaryData binaryData = BinaryData.fromObject& * * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * & * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toStream --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromStream& * final byte[] bytes = new byte[data.length]; * binaryData.toStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toStream --> * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * <!-- src_embed com.azure.util.BinaryData.toByteBuffer --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * final byte[] bytes = new byte[data.length]; * binaryData.toByteBuffer& * System.out.println& * </pre> * <!-- end com.azure.util.BinaryData.toByteBuffer --> * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content * is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify * the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } /** * Returns a flag indicating whether the content can be repeatedly consumed using all accessors including * {@link * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of * what this method returns. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return a flag indicating whether the content can be repeatedly consumed using all accessors. */ public boolean isReplayable() { return content.isReplayable(); } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * @return Replayable {@link BinaryData}. */ public BinaryData toReplayableBinaryData() { if (this.isReplayable()) { return this; } else { return new BinaryData(content.toReplayableContent()); } } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}. */ public Mono<BinaryData> toReplayableBinaryDataAsync() { if (isReplayable()) { return Mono.just(this); } else { return content.toReplayableContentAsync().map(BinaryData::new); } } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } static { BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() { @Override public BinaryData createBinaryData(BinaryDataContent content) { return new BinaryData(content); } @Override public BinaryDataContent getContent(BinaryData binaryData) { return binaryData.content; } }); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return fromStream(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream, Long length) { return new BinaryData(new InputStreamContent(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return fromStreamAsync(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) { return Mono.fromCallable(() -> fromStream(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { return fromFlux(data, length, true); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final boolean shouldAggregateData = false; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * {@link StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromString * <pre> * final String data = &quot;Some Data&quot;; * & * BinaryData binaryData = BinaryData.fromString& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input byte * array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of * this instance will result in the contents of this BinaryData instance being updated as well. To safely update the * byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromBytes * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile --> * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile --> * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, null, null)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length) { return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * int chunkSize = 8092; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, position, length)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the underlying * byte array. Modifying the contents of the returned byte array will also change the content of this BinaryData * instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended to make a * copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * & * & * & * & * & * * * BinaryData binaryData = BinaryData.fromObject& * * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * & * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toStream --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromStream& * final byte[] bytes = new byte[data.length]; * binaryData.toStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toStream --> * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * <!-- src_embed com.azure.util.BinaryData.toByteBuffer --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * final byte[] bytes = new byte[data.length]; * binaryData.toByteBuffer& * System.out.println& * </pre> * <!-- end com.azure.util.BinaryData.toByteBuffer --> * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content * is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify * the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } /** * Returns a flag indicating whether the content can be repeatedly consumed using all accessors including * {@link * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of * what this method returns. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return a flag indicating whether the content can be repeatedly consumed using all accessors. */ public boolean isReplayable() { return content.isReplayable(); } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * @return Replayable {@link BinaryData}. */ public BinaryData toReplayableBinaryData() { if (this.isReplayable()) { return this; } else { return new BinaryData(content.toReplayableContent()); } } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}. */ public Mono<BinaryData> toReplayableBinaryDataAsync() { if (isReplayable()) { return Mono.just(this); } else { return content.toReplayableContentAsync().map(BinaryData::new); } } }
For now I'm going to leave the comment and current behavior and do a wider investigation to what calls this to see if changing this would drastically change runtime behavior
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } if (length != null && length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } if (bufferContent && length != null && length > MAX_ARRAY_SIZE) { return monoError(LOGGER, new IllegalArgumentException( String.format("'length' cannot be greater than %d when content buffering is enabled.", MAX_ARRAY_SIZE))); } if (bufferContent) { long[] trueLength = new long[]{0}; return data.map(buffer -> { int bufferSize = buffer.remaining(); ByteBuffer copy = ByteBuffer.allocate(bufferSize); trueLength[0] += bufferSize; copy.put(buffer); copy.flip(); return copy; }) .collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add) .map(buffers -> { return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers) .map(ByteBuffer::duplicate), (length != null) ? length : trueLength[0], true)); }); } else { return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } }
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } if (length != null && length < 0) { return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0.")); } if (bufferContent && length != null && length > MAX_ARRAY_SIZE) { return monoError(LOGGER, new IllegalArgumentException( String.format("'length' cannot be greater than %d when content buffering is enabled.", MAX_ARRAY_SIZE))); } if (bufferContent) { long[] trueLength = new long[]{0}; return data.map(buffer -> { int bufferSize = buffer.remaining(); ByteBuffer copy = ByteBuffer.allocate(bufferSize); trueLength[0] += bufferSize; copy.put(buffer); copy.flip(); return copy; }) .collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add) .map(buffers -> { return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers) .map(ByteBuffer::duplicate), (length != null) ? length : trueLength[0], true)); }); } else { return Mono.just(new BinaryData(new FluxByteBufferContent(data, length))); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } static { BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() { @Override public BinaryData createBinaryData(BinaryDataContent content) { return new BinaryData(content); } @Override public BinaryDataContent getContent(BinaryData binaryData) { return binaryData.content; } }); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return fromStream(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream, Long length) { return new BinaryData(new InputStreamContent(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return fromStreamAsync(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) { return Mono.fromCallable(() -> fromStream(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { return fromFlux(data, length, true); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final boolean shouldAggregateData = false; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * {@link StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromString * <pre> * final String data = &quot;Some Data&quot;; * & * BinaryData binaryData = BinaryData.fromString& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input byte * array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of * this instance will result in the contents of this BinaryData instance being updated as well. To safely update the * byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromBytes * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile --> * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile --> * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, null, null)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length) { return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * int chunkSize = 8092; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, position, length)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the underlying * byte array. Modifying the contents of the returned byte array will also change the content of this BinaryData * instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended to make a * copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * & * & * & * & * & * * * BinaryData binaryData = BinaryData.fromObject& * * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * & * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toStream --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromStream& * final byte[] bytes = new byte[data.length]; * binaryData.toStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toStream --> * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * <!-- src_embed com.azure.util.BinaryData.toByteBuffer --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * final byte[] bytes = new byte[data.length]; * binaryData.toByteBuffer& * System.out.println& * </pre> * <!-- end com.azure.util.BinaryData.toByteBuffer --> * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content * is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify * the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } /** * Returns a flag indicating whether the content can be repeatedly consumed using all accessors including * {@link * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of * what this method returns. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return a flag indicating whether the content can be repeatedly consumed using all accessors. */ public boolean isReplayable() { return content.isReplayable(); } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * @return Replayable {@link BinaryData}. */ public BinaryData toReplayableBinaryData() { if (this.isReplayable()) { return this; } else { return new BinaryData(content.toReplayableContent()); } } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}. */ public Mono<BinaryData> toReplayableBinaryDataAsync() { if (isReplayable()) { return Mono.just(this); } else { return content.toReplayableContentAsync().map(BinaryData::new); } } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true); static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private final BinaryDataContent content; BinaryData(BinaryDataContent content) { this.content = Objects.requireNonNull(content, "'content' cannot be null."); } static { BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() { @Override public BinaryData createBinaryData(BinaryDataContent content) { return new BinaryData(content); } @Override public BinaryDataContent getContent(BinaryData binaryData) { return binaryData.content; } }); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream) { return fromStream(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of * inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream * content is not cached if the stream is not read into a format that requires the content to be fully read into * memory. * <p> * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * </p> * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStream * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * BinaryData binaryData = BinaryData.fromStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStream * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static BinaryData fromStream(InputStream inputStream, Long length) { return new BinaryData(new InputStreamContent(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return fromStreamAsync(inputStream, null); } /** * Creates an instance of {@link BinaryData} from the given {@link InputStream}. * <b>NOTE:</b> The {@link InputStream} is not closed by this function. * * <p><strong>Create an instance from an InputStream</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync * <pre> * byte[] bytes = &quot;Some Data&quot;.getBytes& * final ByteArrayInputStream inputStream = new ByteArrayInputStream& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromStreamAsync& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromStreamAsync * * @param inputStream The {@link InputStream} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}. * @throws UncheckedIOException If any error happens while reading the {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) { return Mono.fromCallable(() -> fromStream(inputStream, length)); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws NullPointerException If {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { return fromFlux(data, null); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <p>This method aggregates data into single byte array.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) { return fromFlux(data, length, true); } /** * Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}. * * <p><strong>Create an instance from a Flux of ByteBuffer</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFlux * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * final long length = data.length; * final boolean shouldAggregateData = false; * final Flux&lt;ByteBuffer&gt; dataFlux = Flux.just& * * Mono&lt;BinaryData&gt; binaryDataMono = BinaryData.fromFlux& * * Disposable subscriber = binaryDataMono * .map& * System.out.println& * return true; * & * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFlux * * @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent. * @param length The length of {@code data} in bytes. * @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred. * @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}. * @throws IllegalArgumentException if the length is less than zero. * @throws NullPointerException if {@code data} is null. */ /** * Creates an instance of {@link BinaryData} from the given {@link String}. * <p> * The {@link String} is converted into bytes using {@link String * {@link StandardCharsets * </p> * <p><strong>Create an instance from a String</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromString * <pre> * final String data = &quot;Some Data&quot;; * & * BinaryData binaryData = BinaryData.fromString& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromString * * @param data The {@link String} that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the {@link String}. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromString(String data) { return new BinaryData(new StringContent(data)); } /** * Creates an instance of {@link BinaryData} from the given byte array. * <p> * If the byte array is null or zero length an empty {@link BinaryData} will be returned. Note that the input byte * array is used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of * this instance will result in the contents of this BinaryData instance being updated as well. To safely update the * byte array without impacting the BinaryData instance, perform an array copy first. * </p> * * <p><strong>Create an instance from a byte array</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromBytes * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromBytes * * @param data The byte array that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the byte array. * @throws NullPointerException If {@code data} is null. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(new ByteArrayContent(data)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link BinaryData} representing the JSON serialized object. * @throws NullPointerException If {@code data} is null. * @see JsonSerializer */ public static BinaryData fromObject(Object data) { return fromObject(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default * {@link JsonSerializer}. * * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to serialize the object. * </p> * <p><strong>Creating an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be JSON serialized that {@link BinaryData} will represent. * @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object. * @see JsonSerializer */ public static Mono<BinaryData> fromObjectAsync(Object data) { return fromObjectAsync(data, SERIALIZER); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObject * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { return new BinaryData(new SerializableContent(data, serializer)); } /** * Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed * {@link ObjectSerializer}. * * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * </p> * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Create an instance from an Object</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * final ObjectSerializer serializer = * new MyJsonSerializer& * Disposable subscriber = BinaryData.fromObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.fromObjectAsync * * @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer} * determines how {@code null} data is serialized. * @param serializer The {@link ObjectSerializer} used to serialize object. * @return A {@link Mono} of {@link BinaryData} representing the serialized object. * @throws NullPointerException If {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks * for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is * not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile --> * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile --> * * @param file The {@link Path} that will be the {@link BinaryData} data. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. */ public static BinaryData fromFile(Path file) { return fromFile(file, STREAM_READ_SIZE); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * BinaryData binaryData = BinaryData.fromFile& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, null, null)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length) { return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length)); } /** * Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method * checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, * however, is not read until there is an attempt to read the contents of the returned BinaryData instance. * * <p><strong>Create an instance from a file</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.fromFile * <pre> * long position = 1024; * long length = 100 * 1048; * int chunkSize = 8092; * BinaryData binaryData = BinaryData.fromFile& * new File& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.fromFile * * @param file The {@link Path} that will be the {@link BinaryData} data. * @param position Position, or offset, within the path where reading begins. * @param length Maximum number of bytes to be read from the path. * @param chunkSize The requested size for each read of the path. * @return A new {@link BinaryData}. * @throws NullPointerException If {@code file} is null. * @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus * {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0. * @throws UncheckedIOException if the file does not exist. */ public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) { return new BinaryData(new FileContent(file, chunkSize, position, length)); } /** * Returns a byte array representation of this {@link BinaryData}. This method returns a reference to the underlying * byte array. Modifying the contents of the returned byte array will also change the content of this BinaryData * instance. If the content source of this BinaryData instance is a file, an Inputstream or a * {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte array, it is recommended to make a * copy of the contents first. * * @return A byte array representing this {@link BinaryData}. */ public byte[] toBytes() { return content.toBytes(); } /** * Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8 * character set. A new instance of String is created each time this method is called. * * @return A {@link String} representing this {@link BinaryData}. */ public String toString() { return content.toString(); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param <T> Type of the deserialized Object. * @param clazz The {@link Class} representing the Object's type. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> T toObject(Class<T> clazz) { return toObject(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * & * & * & * & * & * * * BinaryData binaryData = BinaryData.fromObject& * * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> T toObject(TypeReference<T> typeReference) { return toObject(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { return toObject(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Person person = binaryData.toObject& * System.out.println& * * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObject * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * & * List&lt;Person&gt; persons = binaryData.toObject& * persons.forEach& * </pre> * <!-- end com.azure.core.util.BinaryData.toObject * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return An {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) { Objects.requireNonNull(typeReference, "'typeReference' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return content.toObject(typeReference, serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default * {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no * implementation is found, a default Jackson-based implementation will be used to deserialize the object. * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * & * & * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} is null. * @see JsonSerializer */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) { return toObjectAsync(typeReference, SERIALIZER); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link Class}, should be a non-generic class, for generic classes use * {@link * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData.toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param clazz The {@link Class} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return toObjectAsync(TypeReference.createInstance(clazz), serializer); } /** * Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed * {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type * {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same * type is not recommended. * <p> * The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is * generic create a sub-type of {@link TypeReference}, if the type is non-generic use * {@link TypeReference * <p> * The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your * own implementation. * * <p><strong>Azure SDK implementations</strong></p> * <ul> * <li><a href="https: * <li><a href="https: * </ul> * * <p><strong>Get a non-generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * class Person & * & * private String name; * * & * public Person setName& * this.name = name; * return this; * & * * & * public String getName& * return name; * & * & * final Person data = new Person& * * & * & * & * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * <p><strong>Get a generic Object from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync * <pre> * final Person person1 = new Person& * final Person person2 = new Person& * * List&lt;Person&gt; personList = new ArrayList&lt;&gt;& * personList.add& * personList.add& * * final ObjectSerializer serializer = * new MyJsonSerializer& * BinaryData binaryData = BinaryData.fromObject& * * Disposable subscriber = binaryData * .toObjectAsync& * .subscribe& * * & * TimeUnit.SECONDS.sleep& * subscriber.dispose& * </pre> * <!-- end com.azure.core.util.BinaryData.toObjectAsync * * @param typeReference The {@link TypeReference} representing the Object's type. * @param serializer The {@link ObjectSerializer} used to deserialize object. * @param <T> Type of the deserialized Object. * @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}. * @throws NullPointerException If {@code typeReference} or {@code serializer} is null. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(typeReference, serializer)); } /** * Returns an {@link InputStream} representation of this {@link BinaryData}. * * <p><strong>Get an InputStream from the BinaryData</strong></p> * * <!-- src_embed com.azure.core.util.BinaryData.toStream --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromStream& * final byte[] bytes = new byte[data.length]; * binaryData.toStream& * System.out.println& * </pre> * <!-- end com.azure.core.util.BinaryData.toStream --> * * @return An {@link InputStream} representing the {@link BinaryData}. */ public InputStream toStream() { return content.toStream(); } /** * Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}. * <p> * Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}. * * <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p> * * <!-- src_embed com.azure.util.BinaryData.toByteBuffer --> * <pre> * final byte[] data = &quot;Some Data&quot;.getBytes& * BinaryData binaryData = BinaryData.fromBytes& * final byte[] bytes = new byte[data.length]; * binaryData.toByteBuffer& * System.out.println& * </pre> * <!-- end com.azure.util.BinaryData.toByteBuffer --> * * @return A read-only {@link ByteBuffer} representing the {@link BinaryData}. */ public ByteBuffer toByteBuffer() { return content.toByteBuffer(); } /** * Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content * is not read from the underlying data source until the {@link Flux} is subscribed to. * * @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. */ public Flux<ByteBuffer> toFluxByteBuffer() { return content.toFluxByteBuffer(); } /** * Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify * the length or the length cannot be determined without reading the whole content. * * @return the length of the content, if it is known. */ public Long getLength() { return content.getLength(); } /** * Returns a flag indicating whether the content can be repeatedly consumed using all accessors including * {@link * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of * what this method returns. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return a flag indicating whether the content can be repeatedly consumed using all accessors. */ public boolean isReplayable() { return content.isReplayable(); } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayability --> * <pre> * BinaryData binaryData = binaryDataProducer& * * if & * binaryData = binaryData.toReplayableBinaryData& * & * * streamConsumer& * streamConsumer& * </pre> * <!-- end com.azure.util.BinaryData.replayability --> * * @return Replayable {@link BinaryData}. */ public BinaryData toReplayableBinaryData() { if (this.isReplayable()) { return this; } else { return new BinaryData(content.toReplayableContent()); } } /** * Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed * repeatedly using all accessors including {@link * * <p> * A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and * resetting a stream or buffering in memory are employed to assure replayability. * </p> * * <p> * Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData} * simultaneously. * </p> * * <!-- src_embed com.azure.util.BinaryData.replayabilityAsync --> * <pre> * Mono.fromCallable& * .flatMap& * if & * return Mono.just& * & * return binaryData.toReplayableBinaryDataAsync& * & * & * .flatMap& * fluxConsumer& * .then& * .subscribe& * </pre> * <!-- end com.azure.util.BinaryData.replayabilityAsync --> * * @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}. */ public Mono<BinaryData> toReplayableBinaryDataAsync() { if (isReplayable()) { return Mono.just(this); } else { return content.toReplayableContentAsync().map(BinaryData::new); } } }
nit: The async recording session might have been missed
public void recordingOperations(HttpClient httpClient) { CallAutomationAsyncClient client = getCallingServerClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recordingOperationsAsync", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient communicationIdentityAsyncClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .buildAsyncClient(); String callConnectionId = ""; try { CommunicationUserIdentifier sourceUser = communicationIdentityAsyncClient.createUser().block(); String targetUserId = Optional.ofNullable(ACS_USER_CALL_RECORDING).orElse("8:acs:ad7b4e1f-5b71-4d2f-9db2-b1bae6d4f392_00000014-0b21-aee5-85f4-343a0d0065cf"); List<CommunicationIdentifier> targets = new ArrayList<CommunicationIdentifier>() { { add(new CommunicationUserIdentifier(targetUserId)); } }; String ngrok = "https: CreateCallResult createCallResult = client.createCall(new CreateCallOptions(sourceUser, targets, ngrok)).block(); assertNotNull(createCallResult); waitForOperationCompletion(10000); CallConnectionProperties callConnectionProperties = client.getCallConnectionAsync(createCallResult.getCallConnectionProperties().getCallConnectionId()).getCallProperties().block(); String serverCallId = callConnectionProperties.getServerCallId(); callConnectionId = callConnectionProperties.getCallConnectionId(); CallRecordingAsync callRecording = client.getCallRecordingAsync(); RecordingStateResult recordingResponse = callRecording.startRecording( new StartRecordingOptions(new ServerCallLocator(serverCallId)) .setRecordingStateCallbackUrl(ngrok)) .block(); assertNotNull(recordingResponse); String recordingId = recordingResponse.getRecordingId(); assertNotNull(recordingId); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.pauseRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.INACTIVE, recordingResponse.getRecordingState()); callRecording.resumeRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.stopRecording(recordingId).block(); waitForOperationCompletion(10000); assertThrows(CallingServerErrorException.class, () -> callRecording.getRecordingState(recordingId).block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } finally { CallConnectionAsync callConnection = client.getCallConnectionAsync(callConnectionId); callConnection.hangUp(true).block(); } }
try {
public void recordingOperations(HttpClient httpClient) { CallAutomationAsyncClient client = getCallingServerClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recordingOperationsAsync", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient communicationIdentityAsyncClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .buildAsyncClient(); String callConnectionId = ""; try { CommunicationUserIdentifier sourceUser = communicationIdentityAsyncClient.createUser().block(); String targetUserId = Optional.ofNullable(ACS_USER_CALL_RECORDING).orElse("8:acs:ad7b4e1f-5b71-4d2f-9db2-b1bae6d4f392_00000014-0b21-aee5-85f4-343a0d0065cf"); List<CommunicationIdentifier> targets = new ArrayList<CommunicationIdentifier>() { { add(new CommunicationUserIdentifier(targetUserId)); } }; String ngrok = "https: CreateCallResult createCallResult = client.createCall(new CreateCallOptions(sourceUser, targets, ngrok)).block(); assertNotNull(createCallResult); waitForOperationCompletion(10000); CallConnectionProperties callConnectionProperties = client.getCallConnectionAsync(createCallResult.getCallConnectionProperties().getCallConnectionId()).getCallProperties().block(); String serverCallId = callConnectionProperties.getServerCallId(); callConnectionId = callConnectionProperties.getCallConnectionId(); CallRecordingAsync callRecording = client.getCallRecordingAsync(); RecordingStateResult recordingResponse = callRecording.startRecording( new StartRecordingOptions(new ServerCallLocator(serverCallId)) .setRecordingStateCallbackUrl(ngrok)) .block(); assertNotNull(recordingResponse); String recordingId = recordingResponse.getRecordingId(); assertNotNull(recordingId); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.pauseRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.INACTIVE, recordingResponse.getRecordingState()); callRecording.resumeRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.stopRecording(recordingId).block(); waitForOperationCompletion(10000); assertThrows(CallingServerErrorException.class, () -> callRecording.getRecordingState(recordingId).block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } finally { CallConnectionAsync callConnection = client.getCallConnectionAsync(callConnectionId); callConnection.hangUp(true).block(); } }
class CallRecordingAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
class CallRecordingAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
https://github.com/Azure/azure-sdk-for-java/pull/31127 -- it was submitted here.
public void recordingOperations(HttpClient httpClient) { CallAutomationAsyncClient client = getCallingServerClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recordingOperationsAsync", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient communicationIdentityAsyncClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .buildAsyncClient(); String callConnectionId = ""; try { CommunicationUserIdentifier sourceUser = communicationIdentityAsyncClient.createUser().block(); String targetUserId = Optional.ofNullable(ACS_USER_CALL_RECORDING).orElse("8:acs:ad7b4e1f-5b71-4d2f-9db2-b1bae6d4f392_00000014-0b21-aee5-85f4-343a0d0065cf"); List<CommunicationIdentifier> targets = new ArrayList<CommunicationIdentifier>() { { add(new CommunicationUserIdentifier(targetUserId)); } }; String ngrok = "https: CreateCallResult createCallResult = client.createCall(new CreateCallOptions(sourceUser, targets, ngrok)).block(); assertNotNull(createCallResult); waitForOperationCompletion(10000); CallConnectionProperties callConnectionProperties = client.getCallConnectionAsync(createCallResult.getCallConnectionProperties().getCallConnectionId()).getCallProperties().block(); String serverCallId = callConnectionProperties.getServerCallId(); callConnectionId = callConnectionProperties.getCallConnectionId(); CallRecordingAsync callRecording = client.getCallRecordingAsync(); RecordingStateResult recordingResponse = callRecording.startRecording( new StartRecordingOptions(new ServerCallLocator(serverCallId)) .setRecordingStateCallbackUrl(ngrok)) .block(); assertNotNull(recordingResponse); String recordingId = recordingResponse.getRecordingId(); assertNotNull(recordingId); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.pauseRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.INACTIVE, recordingResponse.getRecordingState()); callRecording.resumeRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.stopRecording(recordingId).block(); waitForOperationCompletion(10000); assertThrows(CallingServerErrorException.class, () -> callRecording.getRecordingState(recordingId).block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } finally { CallConnectionAsync callConnection = client.getCallConnectionAsync(callConnectionId); callConnection.hangUp(true).block(); } }
try {
public void recordingOperations(HttpClient httpClient) { CallAutomationAsyncClient client = getCallingServerClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recordingOperationsAsync", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient communicationIdentityAsyncClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .buildAsyncClient(); String callConnectionId = ""; try { CommunicationUserIdentifier sourceUser = communicationIdentityAsyncClient.createUser().block(); String targetUserId = Optional.ofNullable(ACS_USER_CALL_RECORDING).orElse("8:acs:ad7b4e1f-5b71-4d2f-9db2-b1bae6d4f392_00000014-0b21-aee5-85f4-343a0d0065cf"); List<CommunicationIdentifier> targets = new ArrayList<CommunicationIdentifier>() { { add(new CommunicationUserIdentifier(targetUserId)); } }; String ngrok = "https: CreateCallResult createCallResult = client.createCall(new CreateCallOptions(sourceUser, targets, ngrok)).block(); assertNotNull(createCallResult); waitForOperationCompletion(10000); CallConnectionProperties callConnectionProperties = client.getCallConnectionAsync(createCallResult.getCallConnectionProperties().getCallConnectionId()).getCallProperties().block(); String serverCallId = callConnectionProperties.getServerCallId(); callConnectionId = callConnectionProperties.getCallConnectionId(); CallRecordingAsync callRecording = client.getCallRecordingAsync(); RecordingStateResult recordingResponse = callRecording.startRecording( new StartRecordingOptions(new ServerCallLocator(serverCallId)) .setRecordingStateCallbackUrl(ngrok)) .block(); assertNotNull(recordingResponse); String recordingId = recordingResponse.getRecordingId(); assertNotNull(recordingId); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.pauseRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.INACTIVE, recordingResponse.getRecordingState()); callRecording.resumeRecording(recordingId).block(); waitForOperationCompletion(10000); recordingResponse = callRecording.getRecordingState(recordingId).block(); assertNotNull(recordingResponse); assertEquals(RecordingState.ACTIVE, recordingResponse.getRecordingState()); callRecording.stopRecording(recordingId).block(); waitForOperationCompletion(10000); assertThrows(CallingServerErrorException.class, () -> callRecording.getRecordingState(recordingId).block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } finally { CallConnectionAsync callConnection = client.getCallConnectionAsync(callConnectionId); callConnection.hangUp(true).block(); } }
class CallRecordingAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
class CallRecordingAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
Is this needed?
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); Thread.sleep(10000); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
Thread.sleep(10000);
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void receiveProduceErrorMsg(Message sendFailedMsg) { LOGGER.info("receive send failed msg: '{}'", sendFailedMsg); LATCH.countDown(); } }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void processError(Message sendFailedMsg) { LOGGER.info("receive error message: '{}'", sendFailedMsg); LATCH.countDown(); } }
`HttpHeaders` will never be null in a valid request, them being empty isn't an issue
public Mono<HttpResponse> send(HttpRequest request, Context context) { ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter(); RequestOptions options = new RequestOptions() .setMethod(HttpMethod.valueOf(request.getHttpMethod().name())) .setAbsoluteURI(request.getUrl()); return Mono.create(sink -> client.request(options, requestResult -> { if (requestResult.failed()) { sink.error(requestResult.cause()); return; } HttpClientRequest vertxHttpRequest = requestResult.result(); vertxHttpRequest.exceptionHandler(sink::error); request.getHeaders().stream() .forEach(header -> vertxHttpRequest.putHeader(header.getName(), header.getValuesList())); if (request.getHeaders().get("Content-Length") == null) { vertxHttpRequest.setChunked(true); } vertxHttpRequest.response(event -> { if (event.succeeded()) { HttpClientResponse vertxHttpResponse = event.result(); vertxHttpResponse.exceptionHandler(sink::error); vertxHttpResponse.body(bodyEvent -> { if (bodyEvent.succeeded()) { sink.success(new BufferedVertxHttpResponse(request, vertxHttpResponse, bodyEvent.result())); } else { sink.error(bodyEvent.cause()); } }); } else { sink.error(event.cause()); } }); Flux<ByteBuffer> requestBody = request.getBody(); if (requestBody == null) { vertxHttpRequest.end(); } else { if (progressReporter != null) { requestBody = requestBody.map(buffer -> { progressReporter.reportProgress(buffer.remaining()); return buffer; }); } FluxUtil.collectBytesFromNetworkResponse(requestBody, request.getHeaders()) .subscribeOn(scheduler) .subscribe(bytes -> vertxHttpRequest.write(Buffer.buffer(Unpooled.wrappedBuffer(bytes))), sink::error, vertxHttpRequest::end); } })); }
if (request.getHeaders().get("Content-Length") == null) {
public Mono<HttpResponse> send(HttpRequest request, Context context) { ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter(); RequestOptions options = new RequestOptions() .setMethod(HttpMethod.valueOf(request.getHttpMethod().name())) .setAbsoluteURI(request.getUrl()); return Mono.create(sink -> client.request(options, requestResult -> { if (requestResult.failed()) { sink.error(requestResult.cause()); return; } HttpClientRequest vertxHttpRequest = requestResult.result(); vertxHttpRequest.exceptionHandler(sink::error); request.getHeaders().stream() .forEach(header -> vertxHttpRequest.putHeader(header.getName(), header.getValuesList())); if (request.getHeaders().get("Content-Length") == null) { vertxHttpRequest.setChunked(true); } vertxHttpRequest.response(event -> { if (event.succeeded()) { HttpClientResponse vertxHttpResponse = event.result(); vertxHttpResponse.exceptionHandler(sink::error); vertxHttpResponse.body(bodyEvent -> { if (bodyEvent.succeeded()) { sink.success(new BufferedVertxHttpResponse(request, vertxHttpResponse, bodyEvent.result())); } else { sink.error(bodyEvent.cause()); } }); } else { sink.error(event.cause()); } }); Flux<ByteBuffer> requestBody = request.getBody(); if (requestBody == null) { vertxHttpRequest.end(); } else { if (progressReporter != null) { requestBody = requestBody.map(buffer -> { progressReporter.reportProgress(buffer.remaining()); return buffer; }); } FluxUtil.collectBytesFromNetworkResponse(requestBody, request.getHeaders()) .subscribeOn(scheduler) .subscribe(bytes -> vertxHttpRequest.write(Buffer.buffer(Unpooled.wrappedBuffer(bytes))), sink::error, vertxHttpRequest::end); } })); }
class VertxAsyncHttpClient implements HttpClient { private final Scheduler scheduler; final io.vertx.core.http.HttpClient client; /** * Constructs a {@link VertxAsyncHttpClient}. * * @param client The Vert.x {@link io.vertx.core.http.HttpClient} */ VertxAsyncHttpClient(io.vertx.core.http.HttpClient client, Vertx vertx) { Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(vertx, "vertx cannot be null"); this.client = client; this.scheduler = Schedulers.fromExecutor(vertx.nettyEventLoopGroup()); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override }
class VertxAsyncHttpClient implements HttpClient { private final Scheduler scheduler; final io.vertx.core.http.HttpClient client; /** * Constructs a {@link VertxAsyncHttpClient}. * * @param client The Vert.x {@link io.vertx.core.http.HttpClient} */ VertxAsyncHttpClient(io.vertx.core.http.HttpClient client, Vertx vertx) { Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(vertx, "vertx cannot be null"); this.client = client; this.scheduler = Schedulers.fromExecutor(vertx.nettyEventLoopGroup()); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override }
I think we need it. Let's see the message send scenario: `Spring message ->(success) Spring Cloud Stream MessageChannel ->(fail) EventHub. ` The code make sure Spring message will be send to Spring Cloud Stream MessageChannel.
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); Thread.sleep(10000); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
Thread.sleep(10000);
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void receiveProduceErrorMsg(Message sendFailedMsg) { LOGGER.info("receive send failed msg: '{}'", sendFailedMsg); LATCH.countDown(); } }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void processError(Message sendFailedMsg) { LOGGER.info("receive error message: '{}'", sendFailedMsg); LATCH.countDown(); } }
I meant the `Thread.sleep()`
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); Thread.sleep(10000); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
Thread.sleep(10000);
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void receiveProduceErrorMsg(Message sendFailedMsg) { LOGGER.info("receive send failed msg: '{}'", sendFailedMsg); LATCH.countDown(); } }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void processError(Message sendFailedMsg) { LOGGER.info("receive error message: '{}'", sendFailedMsg); LATCH.countDown(); } }
should not this be false by default?
public void barrierWithAadAuthorizationTokenProviderType() throws URISyntaxException { TokenCredential tokenCredential = new AadSimpleTokenCredential(TestConfigurations.MASTER_KEY); IAuthorizationTokenProvider authTokenProvider = new RxDocumentClientImpl( new URI(TestConfigurations.HOST), null, null, null, null, new Configs(), null, null, tokenCredential, false, false, false, null, null, new CosmosClientTelemetryConfig().sendClientTelemetryToService(false), null, EnumSet.allOf(TagName.class)); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(authTokenProvider.getAuthorizationTokenType()).isEqualTo(AuthorizationTokenType.AadToken); assertThat(barrierRequest.authorizationTokenType).isEqualTo(AuthorizationTokenType.AadToken); assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); }
new CosmosClientTelemetryConfig().sendClientTelemetryToService(false),
public void barrierWithAadAuthorizationTokenProviderType() throws URISyntaxException { TokenCredential tokenCredential = new AadSimpleTokenCredential(TestConfigurations.MASTER_KEY); IAuthorizationTokenProvider authTokenProvider = new RxDocumentClientImpl( new URI(TestConfigurations.HOST), null, null, null, null, new Configs(), null, null, tokenCredential, false, false, false, null, null, new CosmosClientTelemetryConfig().sendClientTelemetryToService(false), null, EnumSet.allOf(TagName.class)); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(authTokenProvider.getAuthorizationTokenType()).isEqualTo(AuthorizationTokenType.AadToken); assertThat(barrierRequest.authorizationTokenType).isEqualTo(AuthorizationTokenType.AadToken); assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); }
class BarrierRequestHelperTest { @Test(groups = "direct") public void barrierBasic() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); for (ResourceType resourceType : ResourceType.values()) { for (OperationType operationType : OperationType.values()) { Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==", randomResource, null); request.setResourceId("3"); try { BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); } catch (Exception e) { if (!BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType)) { fail("Should not fail for non-collection head combinations"); } } } } } @Test(groups = "direct") public void barrierDBFeed() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.HeadFeed); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.Database); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentQueryNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentReadNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(true); } @Test(groups = "direct") public void barrierDocumentReadRidBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, "7mVFAA==", resourceType, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("7mVFAA=="); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(false); } @Test(groups = "direct") @DataProvider(name = "isCollectionHeadBarrierRequestArgProvider") public Object[][] isCollectionHeadBarrierRequestArgProvider() { return new Object[][]{ {ResourceType.Attachment, null, true}, {ResourceType.Document, null, true}, {ResourceType.Conflict, null, true}, {ResourceType.StoredProcedure, null, true}, {ResourceType.Attachment, null, true}, {ResourceType.Trigger, null, true}, {ResourceType.DocumentCollection, OperationType.ReadFeed, false}, {ResourceType.DocumentCollection, OperationType.Query, false}, {ResourceType.DocumentCollection, OperationType.SqlQuery, false}, {ResourceType.DocumentCollection, OperationType.Create, true}, {ResourceType.DocumentCollection, OperationType.Read, true}, {ResourceType.DocumentCollection, OperationType.Replace, true}, {ResourceType.DocumentCollection, OperationType.ExecuteJavaScript, true}, {ResourceType.PartitionKeyRange, null, false}, }; } @Test(groups = "direct", dataProvider = "isCollectionHeadBarrierRequestArgProvider") public void isCollectionHeadBarrierRequest(ResourceType resourceType, OperationType operationType, boolean expectedResult) { if (operationType != null) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType); assertThat(actual).isEqualTo(expectedResult); } else { for (OperationType type : OperationType.values()) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, type); assertThat(actual).isEqualTo(expectedResult); } } } private IAuthorizationTokenProvider getIAuthorizationTokenProvider() { return (RxDocumentClientImpl) new AsyncDocumentClient.Builder() .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) .withServiceEndpoint(TestConfigurations.HOST) .withClientTelemetryConfig( new CosmosClientTelemetryConfig() .sendClientTelemetryToService(ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED)) .build(); } private String getHeaderValue(RxDocumentServiceRequest req, String name) { return req.getHeaders().get(name); } private String getPartitionKey(RxDocumentServiceRequest req) { return getHeaderValue(req, HttpConstants.HttpHeaders.PARTITION_KEY); } private String getCollectionRid(RxDocumentServiceRequest req) { return getHeaderValue(req, WFConstants.BackendHeaders.COLLECTION_RID); } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(RxDocumentServiceRequest req) { return req.getPartitionKeyRangeIdentity(); } private Long getTargetLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_LSN)); } private Long getTargetGlobalLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN)); } class AadSimpleTokenCredential implements TokenCredential { private final String keyEncoded; private final String AAD_HEADER_COSMOS_EMULATOR = "{\"typ\":\"JWT\",\"alg\":\"RS256\",\"x5t\":\"CosmosEmulatorPrimaryMaster\",\"kid\":\"CosmosEmulatorPrimaryMaster\"}"; private final String AAD_CLAIM_COSMOS_EMULATOR_FORMAT = "{\"aud\":\"https: public AadSimpleTokenCredential(String emulatorKey) { if (emulatorKey == null || emulatorKey.isEmpty()) { throw new IllegalArgumentException("emulatorKey"); } this.keyEncoded = Utils.encodeUrlBase64String(emulatorKey.getBytes()); } @Override public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) { String aadToken = emulatorKey_based_AAD_String(); return Mono.just(new AccessToken(aadToken, OffsetDateTime.now().plusHours(2))); } String emulatorKey_based_AAD_String() { ZonedDateTime currentTime = ZonedDateTime.now(); String part1Encoded = Utils.encodeUrlBase64String(AAD_HEADER_COSMOS_EMULATOR.getBytes()); String part2 = String.format(AAD_CLAIM_COSMOS_EMULATOR_FORMAT, currentTime.toEpochSecond(), currentTime.toEpochSecond(), currentTime.plusHours(2).toEpochSecond()); String part2Encoded = Utils.encodeUrlBase64String(part2.getBytes()); return part1Encoded + "." + part2Encoded + "." + this.keyEncoded; } } }
class BarrierRequestHelperTest { @Test(groups = "direct") public void barrierBasic() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); for (ResourceType resourceType : ResourceType.values()) { for (OperationType operationType : OperationType.values()) { Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==", randomResource, null); request.setResourceId("3"); try { BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); } catch (Exception e) { if (!BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType)) { fail("Should not fail for non-collection head combinations"); } } } } } @Test(groups = "direct") public void barrierDBFeed() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.HeadFeed); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.Database); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentQueryNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentReadNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(true); } @Test(groups = "direct") public void barrierDocumentReadRidBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, "7mVFAA==", resourceType, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("7mVFAA=="); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(false); } @Test(groups = "direct") @DataProvider(name = "isCollectionHeadBarrierRequestArgProvider") public Object[][] isCollectionHeadBarrierRequestArgProvider() { return new Object[][]{ {ResourceType.Attachment, null, true}, {ResourceType.Document, null, true}, {ResourceType.Conflict, null, true}, {ResourceType.StoredProcedure, null, true}, {ResourceType.Attachment, null, true}, {ResourceType.Trigger, null, true}, {ResourceType.DocumentCollection, OperationType.ReadFeed, false}, {ResourceType.DocumentCollection, OperationType.Query, false}, {ResourceType.DocumentCollection, OperationType.SqlQuery, false}, {ResourceType.DocumentCollection, OperationType.Create, true}, {ResourceType.DocumentCollection, OperationType.Read, true}, {ResourceType.DocumentCollection, OperationType.Replace, true}, {ResourceType.DocumentCollection, OperationType.ExecuteJavaScript, true}, {ResourceType.PartitionKeyRange, null, false}, }; } @Test(groups = "direct", dataProvider = "isCollectionHeadBarrierRequestArgProvider") public void isCollectionHeadBarrierRequest(ResourceType resourceType, OperationType operationType, boolean expectedResult) { if (operationType != null) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType); assertThat(actual).isEqualTo(expectedResult); } else { for (OperationType type : OperationType.values()) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, type); assertThat(actual).isEqualTo(expectedResult); } } } private IAuthorizationTokenProvider getIAuthorizationTokenProvider() { return (RxDocumentClientImpl) new AsyncDocumentClient.Builder() .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) .withServiceEndpoint(TestConfigurations.HOST) .withClientTelemetryConfig( new CosmosClientTelemetryConfig() .sendClientTelemetryToService(ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED)) .build(); } private String getHeaderValue(RxDocumentServiceRequest req, String name) { return req.getHeaders().get(name); } private String getPartitionKey(RxDocumentServiceRequest req) { return getHeaderValue(req, HttpConstants.HttpHeaders.PARTITION_KEY); } private String getCollectionRid(RxDocumentServiceRequest req) { return getHeaderValue(req, WFConstants.BackendHeaders.COLLECTION_RID); } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(RxDocumentServiceRequest req) { return req.getPartitionKeyRangeIdentity(); } private Long getTargetLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_LSN)); } private Long getTargetGlobalLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN)); } class AadSimpleTokenCredential implements TokenCredential { private final String keyEncoded; private final String AAD_HEADER_COSMOS_EMULATOR = "{\"typ\":\"JWT\",\"alg\":\"RS256\",\"x5t\":\"CosmosEmulatorPrimaryMaster\",\"kid\":\"CosmosEmulatorPrimaryMaster\"}"; private final String AAD_CLAIM_COSMOS_EMULATOR_FORMAT = "{\"aud\":\"https: public AadSimpleTokenCredential(String emulatorKey) { if (emulatorKey == null || emulatorKey.isEmpty()) { throw new IllegalArgumentException("emulatorKey"); } this.keyEncoded = Utils.encodeUrlBase64String(emulatorKey.getBytes()); } @Override public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) { String aadToken = emulatorKey_based_AAD_String(); return Mono.just(new AccessToken(aadToken, OffsetDateTime.now().plusHours(2))); } String emulatorKey_based_AAD_String() { ZonedDateTime currentTime = ZonedDateTime.now(); String part1Encoded = Utils.encodeUrlBase64String(AAD_HEADER_COSMOS_EMULATOR.getBytes()); String part2 = String.format(AAD_CLAIM_COSMOS_EMULATOR_FORMAT, currentTime.toEpochSecond(), currentTime.toEpochSecond(), currentTime.plusHours(2).toEpochSecond()); String part2Encoded = Utils.encodeUrlBase64String(part2.getBytes()); return part1Encoded + "." + part2Encoded + "." + this.keyEncoded; } } }
good capture, forget to delete it.
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); Thread.sleep(10000); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
Thread.sleep(10000);
void testSendAndReceiveMessage() throws InterruptedException { LOGGER.info("EventHubsBinderProduceErrorIT begin."); EventHubsBinderProduceErrorIT.LATCH.await(15, TimeUnit.SECONDS); LOGGER.info("Send a message:" + MESSAGE + "."); many.emitNext(new GenericMessage<>(MESSAGE), Sinks.EmitFailureHandler.FAIL_FAST); assertThat(EventHubsBinderProduceErrorIT.LATCH.await(300, TimeUnit.SECONDS)).isTrue(); LOGGER.info("EventHubsBinderProduceErrorIT end."); }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void receiveProduceErrorMsg(Message sendFailedMsg) { LOGGER.info("receive send failed msg: '{}'", sendFailedMsg); LATCH.countDown(); } }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m.getPayload())) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<List<String>>> consume() { return message -> { List<String> payload = message.getPayload(); LOGGER.info("EventHubsBinderProduceErrorIT: New message received: '{}'", payload); Assertions.fail("EventHubsBinderProduceErrorIT: can't be here"); }; } @ServiceActivator(inputChannel = "errorChannel") public void processError(Message sendFailedMsg) { LOGGER.info("receive error message: '{}'", sendFailedMsg); LATCH.countDown(); } }
nit: Should we also update the info, from `receive produce error message` to `receive error message`?
public void processError(Message sendFailedMsg) { LOGGER.info("receive produce error message: '{}'", sendFailedMsg.getPayload()); }
LOGGER.info("receive produce error message: '{}'", sendFailedMsg.getPayload());
public void processError(Message sendFailedMsg) { LOGGER.info("receive error message: '{}'", sendFailedMsg.getPayload()); }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m)) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<String>> consume() { return message -> { LOGGER.info("EventHubBinderManualModeIT: New message received: '{}'", message.getPayload()); if (message.getPayload().equals(EventHubsBinderManualModeIT.MESSAGE)) { Checkpointer checkpointer = (Checkpointer) message.getHeaders().get(AzureHeaders.CHECKPOINTER); checkpointer.success().handle((r, ex) -> { Assertions.assertNull(ex); }); LATCH.countDown(); } }; } @ServiceActivator(inputChannel = "errorChannel") }
class TestConfig { @Bean Sinks.Many<Message<String>> many() { return Sinks.many().unicast().onBackpressureBuffer(); } @Bean Supplier<Flux<Message<String>>> supply(Sinks.Many<Message<String>> many) { return () -> many.asFlux() .doOnNext(m -> LOGGER.info("Manually sending message {}", m)) .doOnError(t -> LOGGER.error("Error encountered", t)); } @Bean Consumer<Message<String>> consume() { return message -> { LOGGER.info("EventHubBinderManualModeIT: New message received: '{}'", message.getPayload()); if (message.getPayload().equals(EventHubsBinderManualModeIT.MESSAGE)) { Checkpointer checkpointer = (Checkpointer) message.getHeaders().get(AzureHeaders.CHECKPOINTER); checkpointer.success().handle((r, ex) -> { Assertions.assertNull(ex); }); LATCH.countDown(); } }; } @ServiceActivator(inputChannel = "errorChannel") }
hmm, what if we just track the flag in this step -> ``` private boolean explicitlyClientTelemetryEnabled; public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) { explicitlyClientTelemetryEnabled = clientTelemetryEnabled; } ``` same for clientTelemetryConfig and then in clientBuilder.build() --> we kind combine them together
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor accessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); Boolean explicitlySetInConfig = accessor.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig); if (explicitlySetInConfig != null) { CosmosClientTelemetryConfig newTelemetryConfig = accessor .createSnapshot(this.clientTelemetryConfig, clientTelemetryEnabled); accessor.resetIsSendClientTelemetryToServiceEnabled(newTelemetryConfig); this.clientTelemetryConfig = newTelemetryConfig; } this.clientTelemetryEnabledOverride = clientTelemetryEnabled; return this; }
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor accessor =
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor accessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); Boolean explicitlySetInConfig = accessor.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig); if (explicitlySetInConfig != null) { CosmosClientTelemetryConfig newTelemetryConfig = accessor .createSnapshot(this.clientTelemetryConfig, clientTelemetryEnabled); accessor.resetIsSendClientTelemetryToServiceEnabled(newTelemetryConfig); this.clientTelemetryConfig = newTelemetryConfig; } this.clientTelemetryEnabledOverride = clientTelemetryEnabled; return this; }
class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; }
class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; }
Order of calls is important - check the unit test I added - that will explain
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor accessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); Boolean explicitlySetInConfig = accessor.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig); if (explicitlySetInConfig != null) { CosmosClientTelemetryConfig newTelemetryConfig = accessor .createSnapshot(this.clientTelemetryConfig, clientTelemetryEnabled); accessor.resetIsSendClientTelemetryToServiceEnabled(newTelemetryConfig); this.clientTelemetryConfig = newTelemetryConfig; } this.clientTelemetryEnabledOverride = clientTelemetryEnabled; return this; }
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor accessor =
public CosmosClientBuilder clientTelemetryEnabled(boolean clientTelemetryEnabled) { ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor accessor = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor(); Boolean explicitlySetInConfig = accessor.isSendClientTelemetryToServiceEnabled(this.clientTelemetryConfig); if (explicitlySetInConfig != null) { CosmosClientTelemetryConfig newTelemetryConfig = accessor .createSnapshot(this.clientTelemetryConfig, clientTelemetryEnabled); accessor.resetIsSendClientTelemetryToServiceEnabled(newTelemetryConfig); this.clientTelemetryConfig = newTelemetryConfig; } this.clientTelemetryEnabledOverride = clientTelemetryEnabled; return this; }
class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; }
class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; }
when coming thourgh public API = yes. But some tests create RxDocumentSeviceClient manually - and there it is expected that the effecive flag is passed down. CosmosClinetBuilder is doing it when coming through public API.
public void barrierWithAadAuthorizationTokenProviderType() throws URISyntaxException { TokenCredential tokenCredential = new AadSimpleTokenCredential(TestConfigurations.MASTER_KEY); IAuthorizationTokenProvider authTokenProvider = new RxDocumentClientImpl( new URI(TestConfigurations.HOST), null, null, null, null, new Configs(), null, null, tokenCredential, false, false, false, null, null, new CosmosClientTelemetryConfig().sendClientTelemetryToService(false), null, EnumSet.allOf(TagName.class)); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(authTokenProvider.getAuthorizationTokenType()).isEqualTo(AuthorizationTokenType.AadToken); assertThat(barrierRequest.authorizationTokenType).isEqualTo(AuthorizationTokenType.AadToken); assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); }
new CosmosClientTelemetryConfig().sendClientTelemetryToService(false),
public void barrierWithAadAuthorizationTokenProviderType() throws URISyntaxException { TokenCredential tokenCredential = new AadSimpleTokenCredential(TestConfigurations.MASTER_KEY); IAuthorizationTokenProvider authTokenProvider = new RxDocumentClientImpl( new URI(TestConfigurations.HOST), null, null, null, null, new Configs(), null, null, tokenCredential, false, false, false, null, null, new CosmosClientTelemetryConfig().sendClientTelemetryToService(false), null, EnumSet.allOf(TagName.class)); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(authTokenProvider.getAuthorizationTokenType()).isEqualTo(AuthorizationTokenType.AadToken); assertThat(barrierRequest.authorizationTokenType).isEqualTo(AuthorizationTokenType.AadToken); assertThat(request.authorizationTokenType).isEqualTo(AuthorizationTokenType.PrimaryMasterKey); }
class BarrierRequestHelperTest { @Test(groups = "direct") public void barrierBasic() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); for (ResourceType resourceType : ResourceType.values()) { for (OperationType operationType : OperationType.values()) { Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==", randomResource, null); request.setResourceId("3"); try { BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); } catch (Exception e) { if (!BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType)) { fail("Should not fail for non-collection head combinations"); } } } } } @Test(groups = "direct") public void barrierDBFeed() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.HeadFeed); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.Database); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentQueryNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentReadNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(true); } @Test(groups = "direct") public void barrierDocumentReadRidBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, "7mVFAA==", resourceType, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("7mVFAA=="); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(false); } @Test(groups = "direct") @DataProvider(name = "isCollectionHeadBarrierRequestArgProvider") public Object[][] isCollectionHeadBarrierRequestArgProvider() { return new Object[][]{ {ResourceType.Attachment, null, true}, {ResourceType.Document, null, true}, {ResourceType.Conflict, null, true}, {ResourceType.StoredProcedure, null, true}, {ResourceType.Attachment, null, true}, {ResourceType.Trigger, null, true}, {ResourceType.DocumentCollection, OperationType.ReadFeed, false}, {ResourceType.DocumentCollection, OperationType.Query, false}, {ResourceType.DocumentCollection, OperationType.SqlQuery, false}, {ResourceType.DocumentCollection, OperationType.Create, true}, {ResourceType.DocumentCollection, OperationType.Read, true}, {ResourceType.DocumentCollection, OperationType.Replace, true}, {ResourceType.DocumentCollection, OperationType.ExecuteJavaScript, true}, {ResourceType.PartitionKeyRange, null, false}, }; } @Test(groups = "direct", dataProvider = "isCollectionHeadBarrierRequestArgProvider") public void isCollectionHeadBarrierRequest(ResourceType resourceType, OperationType operationType, boolean expectedResult) { if (operationType != null) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType); assertThat(actual).isEqualTo(expectedResult); } else { for (OperationType type : OperationType.values()) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, type); assertThat(actual).isEqualTo(expectedResult); } } } private IAuthorizationTokenProvider getIAuthorizationTokenProvider() { return (RxDocumentClientImpl) new AsyncDocumentClient.Builder() .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) .withServiceEndpoint(TestConfigurations.HOST) .withClientTelemetryConfig( new CosmosClientTelemetryConfig() .sendClientTelemetryToService(ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED)) .build(); } private String getHeaderValue(RxDocumentServiceRequest req, String name) { return req.getHeaders().get(name); } private String getPartitionKey(RxDocumentServiceRequest req) { return getHeaderValue(req, HttpConstants.HttpHeaders.PARTITION_KEY); } private String getCollectionRid(RxDocumentServiceRequest req) { return getHeaderValue(req, WFConstants.BackendHeaders.COLLECTION_RID); } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(RxDocumentServiceRequest req) { return req.getPartitionKeyRangeIdentity(); } private Long getTargetLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_LSN)); } private Long getTargetGlobalLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN)); } class AadSimpleTokenCredential implements TokenCredential { private final String keyEncoded; private final String AAD_HEADER_COSMOS_EMULATOR = "{\"typ\":\"JWT\",\"alg\":\"RS256\",\"x5t\":\"CosmosEmulatorPrimaryMaster\",\"kid\":\"CosmosEmulatorPrimaryMaster\"}"; private final String AAD_CLAIM_COSMOS_EMULATOR_FORMAT = "{\"aud\":\"https: public AadSimpleTokenCredential(String emulatorKey) { if (emulatorKey == null || emulatorKey.isEmpty()) { throw new IllegalArgumentException("emulatorKey"); } this.keyEncoded = Utils.encodeUrlBase64String(emulatorKey.getBytes()); } @Override public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) { String aadToken = emulatorKey_based_AAD_String(); return Mono.just(new AccessToken(aadToken, OffsetDateTime.now().plusHours(2))); } String emulatorKey_based_AAD_String() { ZonedDateTime currentTime = ZonedDateTime.now(); String part1Encoded = Utils.encodeUrlBase64String(AAD_HEADER_COSMOS_EMULATOR.getBytes()); String part2 = String.format(AAD_CLAIM_COSMOS_EMULATOR_FORMAT, currentTime.toEpochSecond(), currentTime.toEpochSecond(), currentTime.plusHours(2).toEpochSecond()); String part2Encoded = Utils.encodeUrlBase64String(part2.getBytes()); return part1Encoded + "." + part2Encoded + "." + this.keyEncoded; } } }
class BarrierRequestHelperTest { @Test(groups = "direct") public void barrierBasic() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); for (ResourceType resourceType : ResourceType.values()) { for (OperationType operationType : OperationType.values()) { Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==", randomResource, null); request.setResourceId("3"); try { BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 10l, 10l).block(); } catch (Exception e) { if (!BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType)) { fail("Should not fail for non-collection head combinations"); } } } } } @Test(groups = "direct") public void barrierDBFeed() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.DocumentCollection; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/7mVFAA==/colls/7mVFAP1jpeU=", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.HeadFeed); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.Database); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentQueryNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Query; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); } @Test(groups = "direct") public void barrierDocumentReadNameBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, resourceType, "/dbs/dbname/colls/collname", randomResource, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("dbs/dbname/colls/collname"); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(true); } @Test(groups = "direct") public void barrierDocumentReadRidBasedRequest() { IAuthorizationTokenProvider authTokenProvider = getIAuthorizationTokenProvider(); ResourceType resourceType = ResourceType.Document; OperationType operationType = OperationType.Read; Document randomResource = new Document(); randomResource.setId(UUID.randomUUID().toString()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(mockDiagnosticsClientContext(), operationType, "7mVFAA==", resourceType, (Map<String, String>) null); RxDocumentServiceRequest barrierRequest = BarrierRequestHelper.createAsync(mockDiagnosticsClientContext(), request, authTokenProvider, 11l, 10l).block(); assertThat(barrierRequest.getOperationType()).isEqualTo(OperationType.Head); assertThat(barrierRequest.getResourceType()).isEqualTo(ResourceType.DocumentCollection); assertThat(barrierRequest.getResourceAddress()).isEqualTo("7mVFAA=="); assertThat(getTargetGlobalLsn(barrierRequest)).isEqualTo(10l); assertThat(getTargetLsn(barrierRequest)).isEqualTo(11l); assertThat(barrierRequest.getIsNameBased()).isEqualTo(false); } @Test(groups = "direct") @DataProvider(name = "isCollectionHeadBarrierRequestArgProvider") public Object[][] isCollectionHeadBarrierRequestArgProvider() { return new Object[][]{ {ResourceType.Attachment, null, true}, {ResourceType.Document, null, true}, {ResourceType.Conflict, null, true}, {ResourceType.StoredProcedure, null, true}, {ResourceType.Attachment, null, true}, {ResourceType.Trigger, null, true}, {ResourceType.DocumentCollection, OperationType.ReadFeed, false}, {ResourceType.DocumentCollection, OperationType.Query, false}, {ResourceType.DocumentCollection, OperationType.SqlQuery, false}, {ResourceType.DocumentCollection, OperationType.Create, true}, {ResourceType.DocumentCollection, OperationType.Read, true}, {ResourceType.DocumentCollection, OperationType.Replace, true}, {ResourceType.DocumentCollection, OperationType.ExecuteJavaScript, true}, {ResourceType.PartitionKeyRange, null, false}, }; } @Test(groups = "direct", dataProvider = "isCollectionHeadBarrierRequestArgProvider") public void isCollectionHeadBarrierRequest(ResourceType resourceType, OperationType operationType, boolean expectedResult) { if (operationType != null) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, operationType); assertThat(actual).isEqualTo(expectedResult); } else { for (OperationType type : OperationType.values()) { boolean actual = BarrierRequestHelper.isCollectionHeadBarrierRequest(resourceType, type); assertThat(actual).isEqualTo(expectedResult); } } } private IAuthorizationTokenProvider getIAuthorizationTokenProvider() { return (RxDocumentClientImpl) new AsyncDocumentClient.Builder() .withMasterKeyOrResourceToken(TestConfigurations.MASTER_KEY) .withServiceEndpoint(TestConfigurations.HOST) .withClientTelemetryConfig( new CosmosClientTelemetryConfig() .sendClientTelemetryToService(ClientTelemetry.DEFAULT_CLIENT_TELEMETRY_ENABLED)) .build(); } private String getHeaderValue(RxDocumentServiceRequest req, String name) { return req.getHeaders().get(name); } private String getPartitionKey(RxDocumentServiceRequest req) { return getHeaderValue(req, HttpConstants.HttpHeaders.PARTITION_KEY); } private String getCollectionRid(RxDocumentServiceRequest req) { return getHeaderValue(req, WFConstants.BackendHeaders.COLLECTION_RID); } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(RxDocumentServiceRequest req) { return req.getPartitionKeyRangeIdentity(); } private Long getTargetLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_LSN)); } private Long getTargetGlobalLsn(RxDocumentServiceRequest req) { return Long.parseLong(getHeaderValue(req, HttpConstants.HttpHeaders.TARGET_GLOBAL_COMMITTED_LSN)); } class AadSimpleTokenCredential implements TokenCredential { private final String keyEncoded; private final String AAD_HEADER_COSMOS_EMULATOR = "{\"typ\":\"JWT\",\"alg\":\"RS256\",\"x5t\":\"CosmosEmulatorPrimaryMaster\",\"kid\":\"CosmosEmulatorPrimaryMaster\"}"; private final String AAD_CLAIM_COSMOS_EMULATOR_FORMAT = "{\"aud\":\"https: public AadSimpleTokenCredential(String emulatorKey) { if (emulatorKey == null || emulatorKey.isEmpty()) { throw new IllegalArgumentException("emulatorKey"); } this.keyEncoded = Utils.encodeUrlBase64String(emulatorKey.getBytes()); } @Override public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) { String aadToken = emulatorKey_based_AAD_String(); return Mono.just(new AccessToken(aadToken, OffsetDateTime.now().plusHours(2))); } String emulatorKey_based_AAD_String() { ZonedDateTime currentTime = ZonedDateTime.now(); String part1Encoded = Utils.encodeUrlBase64String(AAD_HEADER_COSMOS_EMULATOR.getBytes()); String part2 = String.format(AAD_CLAIM_COSMOS_EMULATOR_FORMAT, currentTime.toEpochSecond(), currentTime.toEpochSecond(), currentTime.plusHours(2).toEpochSecond()); String part2Encoded = Utils.encodeUrlBase64String(part2.getBytes()); return part1Encoded + "." + part2Encoded + "." + this.keyEncoded; } } }
Hmm - I think you could use "\\s+", correct. Also would make sense to do expandedQuery.toString().toUpperCase().replaceAll("\\s", "") only once instead of doing it for each parameter?
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); if (!queryParam.getName().isEmpty()) { String inParamCheck = "ARRAY_CONTAINS(@" + queryParam.getName().get(); if (parameters[paramIndex] instanceof Collection && expandedQuery.toString().toUpperCase().replaceAll("\\s", "") .indexOf(inParamCheck.toUpperCase()) == -1) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { String paramName = "@" + queryParam.getName().orElse("") + arrayIndex; expandedParamKeys.add(paramName); sqlParameters.add(new SqlParameter(paramName, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
&& expandedQuery.toString().toUpperCase().replaceAll("\\s", "")
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); /* * The below for loop is used to handle two unique use cases with annotated queries. * Annotated queries are defined as strings so there is no way to know the clauses * being used in advance. Some clauses expect an array and others expect just a list of values. * (1) IN clauses expect the syntax 'IN (a, b, c) which is generated from the if statement. * (2) ARRAY_CONTAINS expects the syntax 'ARRAY_CONTAINS(["a", "b", "c"], table.param) which * is generated from the else statement. */ String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); String modifiedExpandedQuery = expandedQuery.toLowerCase(Locale.US).replaceAll("\\s+", ""); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); String paramName = queryParam.getName().orElse(""); if (!("").equals(paramName)) { String inParamCheck = "array_contains(@" + paramName.toLowerCase(Locale.US); if (parameters[paramIndex] instanceof Collection && !modifiedExpandedQuery.contains(inParamCheck)) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { expandedParamKeys.add("@" + paramName + arrayIndex); sqlParameters.add(new SqlParameter("@" + paramName + arrayIndex, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
Would also like to see some comments here - I think I get what you are doing - but a sample with some comments explaining the flow would help to make it easier to still read and understand the code in a few weeks.
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); if (!queryParam.getName().isEmpty()) { String inParamCheck = "ARRAY_CONTAINS(@" + queryParam.getName().get(); if (parameters[paramIndex] instanceof Collection && expandedQuery.toString().toUpperCase().replaceAll("\\s", "") .indexOf(inParamCheck.toUpperCase()) == -1) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { String paramName = "@" + queryParam.getName().orElse("") + arrayIndex; expandedParamKeys.add(paramName); sqlParameters.add(new SqlParameter(paramName, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
&& expandedQuery.toString().toUpperCase().replaceAll("\\s", "")
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); /* * The below for loop is used to handle two unique use cases with annotated queries. * Annotated queries are defined as strings so there is no way to know the clauses * being used in advance. Some clauses expect an array and others expect just a list of values. * (1) IN clauses expect the syntax 'IN (a, b, c) which is generated from the if statement. * (2) ARRAY_CONTAINS expects the syntax 'ARRAY_CONTAINS(["a", "b", "c"], table.param) which * is generated from the else statement. */ String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); String modifiedExpandedQuery = expandedQuery.toLowerCase(Locale.US).replaceAll("\\s+", ""); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); String paramName = queryParam.getName().orElse(""); if (!("").equals(paramName)) { String inParamCheck = "array_contains(@" + paramName.toLowerCase(Locale.US); if (parameters[paramIndex] instanceof Collection && !modifiedExpandedQuery.contains(inParamCheck)) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { expandedParamKeys.add("@" + paramName + arrayIndex); sqlParameters.add(new SqlParameter("@" + paramName + arrayIndex, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
Usually also string comparison is safer with lower case than with upper case - because there is no upper case letter for all characters - german special charatcer 'ß' for example has not corresponding upper case char
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); if (!queryParam.getName().isEmpty()) { String inParamCheck = "ARRAY_CONTAINS(@" + queryParam.getName().get(); if (parameters[paramIndex] instanceof Collection && expandedQuery.toString().toUpperCase().replaceAll("\\s", "") .indexOf(inParamCheck.toUpperCase()) == -1) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { String paramName = "@" + queryParam.getName().orElse("") + arrayIndex; expandedParamKeys.add(paramName); sqlParameters.add(new SqlParameter(paramName, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
&& expandedQuery.toString().toUpperCase().replaceAll("\\s", "")
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); /* * The below for loop is used to handle two unique use cases with annotated queries. * Annotated queries are defined as strings so there is no way to know the clauses * being used in advance. Some clauses expect an array and others expect just a list of values. * (1) IN clauses expect the syntax 'IN (a, b, c) which is generated from the if statement. * (2) ARRAY_CONTAINS expects the syntax 'ARRAY_CONTAINS(["a", "b", "c"], table.param) which * is generated from the else statement. */ String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); String modifiedExpandedQuery = expandedQuery.toLowerCase(Locale.US).replaceAll("\\s+", ""); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); String paramName = queryParam.getName().orElse(""); if (!("").equals(paramName)) { String inParamCheck = "array_contains(@" + paramName.toLowerCase(Locale.US); if (parameters[paramIndex] instanceof Collection && !modifiedExpandedQuery.contains(inParamCheck)) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { expandedParamKeys.add("@" + paramName + arrayIndex); sqlParameters.add(new SqlParameter("@" + paramName + arrayIndex, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
Thanks for the feedback!
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); if (!queryParam.getName().isEmpty()) { String inParamCheck = "ARRAY_CONTAINS(@" + queryParam.getName().get(); if (parameters[paramIndex] instanceof Collection && expandedQuery.toString().toUpperCase().replaceAll("\\s", "") .indexOf(inParamCheck.toUpperCase()) == -1) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { String paramName = "@" + queryParam.getName().orElse("") + arrayIndex; expandedParamKeys.add(paramName); sqlParameters.add(new SqlParameter(paramName, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
&& expandedQuery.toString().toUpperCase().replaceAll("\\s", "")
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); /* * The below for loop is used to handle two unique use cases with annotated queries. * Annotated queries are defined as strings so there is no way to know the clauses * being used in advance. Some clauses expect an array and others expect just a list of values. * (1) IN clauses expect the syntax 'IN (a, b, c) which is generated from the if statement. * (2) ARRAY_CONTAINS expects the syntax 'ARRAY_CONTAINS(["a", "b", "c"], table.param) which * is generated from the else statement. */ String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); String modifiedExpandedQuery = expandedQuery.toLowerCase(Locale.US).replaceAll("\\s+", ""); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); String paramName = queryParam.getName().orElse(""); if (!("").equals(paramName)) { String inParamCheck = "array_contains(@" + paramName.toLowerCase(Locale.US); if (parameters[paramIndex] instanceof Collection && !modifiedExpandedQuery.contains(inParamCheck)) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { expandedParamKeys.add("@" + paramName + arrayIndex); sqlParameters.add(new SqlParameter("@" + paramName + arrayIndex, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
This is still doing the rather expensive transformation for every parameter? My suggestion was to do it outside of the for loop - so make it O(1) instead of O(n) (with n being parameter count)
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); /* * The below for loop is used to handle two unique use cases with annotated queries. * Annotated queries are defined as strings so there is no way to know the clauses * being used in advance. Some clauses expect an array and others expect just a list of values. * (1) IN clauses expect the syntax 'IN (a, b, c) which is generated from the if statement. * (2) ARRAY_CONTAINS expects the syntax 'ARRAY_CONTAINS(["a", "b", "c"], table.param) which * is generated from the else statement. */ String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); String paramName = queryParam.getName().orElse(""); if (!paramName.equals("")) { String modifiedExpandedQuery = expandedQuery.toLowerCase().replaceAll("\\s+", ""); String inParamCheck = "array_contains(@" + paramName.toLowerCase(); if (parameters[paramIndex] instanceof Collection && !modifiedExpandedQuery.contains(inParamCheck)) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { expandedParamKeys.add("@" + paramName + arrayIndex); sqlParameters.add(new SqlParameter("@" + paramName + arrayIndex, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
String modifiedExpandedQuery = expandedQuery.toLowerCase().replaceAll("\\s+", "");
public Object execute(final Object[] parameters) { final CosmosParameterAccessor accessor = new CosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); /* * The below for loop is used to handle two unique use cases with annotated queries. * Annotated queries are defined as strings so there is no way to know the clauses * being used in advance. Some clauses expect an array and others expect just a list of values. * (1) IN clauses expect the syntax 'IN (a, b, c) which is generated from the if statement. * (2) ARRAY_CONTAINS expects the syntax 'ARRAY_CONTAINS(["a", "b", "c"], table.param) which * is generated from the else statement. */ String expandedQuery = query; List<SqlParameter> sqlParameters = new ArrayList<>(); String modifiedExpandedQuery = expandedQuery.toLowerCase(Locale.US).replaceAll("\\s+", ""); for (int paramIndex = 0; paramIndex < parameters.length; paramIndex++) { Parameter queryParam = getQueryMethod().getParameters().getParameter(paramIndex); String paramName = queryParam.getName().orElse(""); if (!("").equals(paramName)) { String inParamCheck = "array_contains(@" + paramName.toLowerCase(Locale.US); if (parameters[paramIndex] instanceof Collection && !modifiedExpandedQuery.contains(inParamCheck)) { List<String> expandParam = ((Collection<?>) parameters[paramIndex]).stream() .map(Object::toString).collect(Collectors.toList()); List<String> expandedParamKeys = new ArrayList<>(); for (int arrayIndex = 0; arrayIndex < expandParam.size(); arrayIndex++) { expandedParamKeys.add("@" + paramName + arrayIndex); sqlParameters.add(new SqlParameter("@" + paramName + arrayIndex, toCosmosDbValue(expandParam.get(arrayIndex)))); } expandedQuery = expandedQuery.replaceAll("@" + queryParam.getName().orElse(""), String.join(",", expandedParamKeys)); } else { if (!Pageable.class.isAssignableFrom(queryParam.getType()) && !Sort.class.isAssignableFrom(queryParam.getType())) { sqlParameters.add(new SqlParameter("@" + queryParam.getName().orElse(""), toCosmosDbValue(parameters[paramIndex]))); } } } } SqlQuerySpec querySpec = new SqlQuerySpec(expandedQuery, sqlParameters); if (isPageQuery()) { return this.operations.runPaginationQuery(querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isSliceQuery()) { return this.operations.runSliceQuery( querySpec, accessor.getPageable(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } else if (isCountQuery()) { final String container = ((CosmosEntityMetadata<?>) getQueryMethod().getEntityInformation()).getContainerName(); return this.operations.count(querySpec, container); } else { return this.operations.runQuery(querySpec, accessor.getSort(), processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
class StringBasedCosmosQuery extends AbstractCosmosQuery { private static final Pattern COUNT_QUERY_PATTERN = Pattern.compile("^\\s*select\\s+value\\s+count.*", Pattern.CASE_INSENSITIVE); private final String query; /** * Constructor * @param queryMethod the CosmosQueryMethod * @param dbOperations the CosmosOperations */ public StringBasedCosmosQuery(CosmosQueryMethod queryMethod, CosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(CosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } @Override protected boolean isCountQuery() { return isCountQuery(query, getQueryMethod().getReturnedObjectType()); } static boolean isCountQuery(String query, Class<?> returnedType) { if (isCountQueryReturnType(returnedType)) { return COUNT_QUERY_PATTERN.matcher(query).matches(); } else { return false; } } private static boolean isCountQueryReturnType(Class<?> returnedType) { return returnedType == Long.class || returnedType == long.class || returnedType == Integer.class || returnedType == int.class; } }
This is not `async`, please fix it in another PR.
private Mono<Void> doSend(String destination, List<EventData> events, PartitionSupplier partitionSupplier) { EventHubProducerAsyncClient producer = producerFactory.createProducer(destination); CreateBatchOptions options = buildCreateBatchOptions(partitionSupplier); AtomicReference<EventDataBatch> currentBatch = new AtomicReference<>( producer.createBatch(options).block()); Flux.fromIterable(events).flatMap(event -> { final EventDataBatch batch = currentBatch.get(); try { if (batch.tryAdd(event)) { return Mono.empty(); } else { LOGGER.warn("EventDataBatch is full in the collect process or the first event is " + "too large to fit in an empty batch! Max size: {}", batch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event is larger than maximum allowed size.", e); return Mono.empty(); } return Mono.when( producer.send(batch), producer.createBatch(options).map(newBatch -> { currentBatch.set(newBatch); try { if (!newBatch.tryAdd(event)) { LOGGER.error("Event was too large to fit in an empty batch. Max size:{} ", newBatch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event was too large to fit in an empty batch. Max size:{}", newBatch.getMaxSizeInBytes(), e); } return newBatch; })); }) .then() .block(); final EventDataBatch batch = currentBatch.getAndSet(null); return producer.send(batch) .doFinally(s -> producer.close()); }
.block();
private Mono<Void> doSend(String destination, List<EventData> events, PartitionSupplier partitionSupplier) { EventHubProducerAsyncClient producer = producerFactory.createProducer(destination); CreateBatchOptions options = buildCreateBatchOptions(partitionSupplier); AtomicReference<EventDataBatch> currentBatch = new AtomicReference<>( producer.createBatch(options).block()); Flux.fromIterable(events).flatMap(event -> { final EventDataBatch batch = currentBatch.get(); try { if (batch.tryAdd(event)) { return Mono.empty(); } else { LOGGER.warn("EventDataBatch is full in the collect process or the first event is " + "too large to fit in an empty batch! Max size: {}", batch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event is larger than maximum allowed size.", e); return Mono.empty(); } return Mono.when( producer.send(batch), producer.createBatch(options).map(newBatch -> { currentBatch.set(newBatch); try { if (!newBatch.tryAdd(event)) { LOGGER.error("Event was too large to fit in an empty batch. Max size:{} ", newBatch.getMaxSizeInBytes()); } } catch (AmqpException e) { LOGGER.error("Event was too large to fit in an empty batch. Max size:{}", newBatch.getMaxSizeInBytes(), e); } return newBatch; })); }) .then() .block(); final EventDataBatch batch = currentBatch.getAndSet(null); return producer.send(batch) .doFinally(s -> producer.close()); }
class EventHubsTemplate implements SendOperation { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsTemplate.class); private final EventHubsProducerFactory producerFactory; private EventHubsMessageConverter messageConverter = new EventHubsMessageConverter(); /** * Create an instance using the supplied producer factory. * @param producerFactory the producer factory. */ public EventHubsTemplate(EventHubsProducerFactory producerFactory) { this.producerFactory = producerFactory; } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination with a given partition supplier asynchronously. * @param destination destination * @param messages message set * @param partitionSupplier partition supplier * @param <T> payload type in message * @return Mono Void */ public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { List<EventData> eventData = messages.stream() .map(m -> messageConverter.fromMessage(m, EventData.class)) .collect(Collectors.toList()); return doSend(destination, eventData, partitionSupplier); } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination asynchronously. * @param destination destination * @param messages message set * @param <T> payload type in message * @return Mono Void */ public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages) { return sendAsync(destination, messages, null); } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination with a given partition supplier synchronously. * @param destination destination * @param messages message set * @param partitionSupplier partition supplier * @param <T> payload type in message */ public <T> void send(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { sendAsync(destination, messages, partitionSupplier).block(); } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination synchronously. * @param destination destination * @param messages message set * @param <T> payload type in message */ public <T> void send(String destination, Collection<Message<T>> messages) { send(destination, messages, null); } @Override public <T> Mono<Void> sendAsync(String destination, Message<T> message) { return sendAsync(destination, Collections.singleton(message), buildPartitionSupplier(message)); } private CreateBatchOptions buildCreateBatchOptions(PartitionSupplier partitionSupplier) { return new CreateBatchOptions() .setPartitionId(partitionSupplier != null ? partitionSupplier.getPartitionId() : null) .setPartitionKey(partitionSupplier != null ? partitionSupplier.getPartitionKey() : null); } <T> PartitionSupplier buildPartitionSupplier(Message<T> message) { PartitionSupplier partitionSupplier = new PartitionSupplier(); Optional.ofNullable(message.getHeaders().get(PARTITION_KEY)).ifPresent(s -> partitionSupplier.setPartitionKey(String.valueOf(s))); Optional.ofNullable(message.getHeaders().get(PARTITION_ID)).ifPresent(s -> partitionSupplier.setPartitionId(String.valueOf(s))); return partitionSupplier; } /** * Set the message converter. * @param messageConverter the message converter. */ public void setMessageConverter(EventHubsMessageConverter messageConverter) { this.messageConverter = messageConverter; } }
class EventHubsTemplate implements SendOperation { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsTemplate.class); private final EventHubsProducerFactory producerFactory; private EventHubsMessageConverter messageConverter = new EventHubsMessageConverter(); /** * Create an instance using the supplied producer factory. * @param producerFactory the producer factory. */ public EventHubsTemplate(EventHubsProducerFactory producerFactory) { this.producerFactory = producerFactory; } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination with a given partition supplier asynchronously. * @param destination destination * @param messages message set * @param partitionSupplier partition supplier * @param <T> payload type in message * @return Mono Void */ public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { List<EventData> eventData = messages.stream() .map(m -> messageConverter.fromMessage(m, EventData.class)) .collect(Collectors.toList()); return doSend(destination, eventData, partitionSupplier); } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination asynchronously. * @param destination destination * @param messages message set * @param <T> payload type in message * @return Mono Void */ public <T> Mono<Void> sendAsync(String destination, Collection<Message<T>> messages) { return sendAsync(destination, messages, null); } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination with a given partition supplier synchronously. * @param destination destination * @param messages message set * @param partitionSupplier partition supplier * @param <T> payload type in message */ public <T> void send(String destination, Collection<Message<T>> messages, PartitionSupplier partitionSupplier) { sendAsync(destination, messages, partitionSupplier).block(); } /** * Send a {@link Collection}&lt;{@link Message}&gt; to the given destination synchronously. * @param destination destination * @param messages message set * @param <T> payload type in message */ public <T> void send(String destination, Collection<Message<T>> messages) { send(destination, messages, null); } @Override public <T> Mono<Void> sendAsync(String destination, Message<T> message) { return sendAsync(destination, Collections.singleton(message), buildPartitionSupplier(message)); } private CreateBatchOptions buildCreateBatchOptions(PartitionSupplier partitionSupplier) { return new CreateBatchOptions() .setPartitionId(partitionSupplier != null ? partitionSupplier.getPartitionId() : null) .setPartitionKey(partitionSupplier != null ? partitionSupplier.getPartitionKey() : null); } <T> PartitionSupplier buildPartitionSupplier(Message<T> message) { PartitionSupplier partitionSupplier = new PartitionSupplier(); Optional.ofNullable(message.getHeaders().get(PARTITION_KEY)).ifPresent(s -> partitionSupplier.setPartitionKey(String.valueOf(s))); Optional.ofNullable(message.getHeaders().get(PARTITION_ID)).ifPresent(s -> partitionSupplier.setPartitionId(String.valueOf(s))); return partitionSupplier; } /** * Set the message converter. * @param messageConverter the message converter. */ public void setMessageConverter(EventHubsMessageConverter messageConverter) { this.messageConverter = messageConverter; } }
should we close outputStream ?
static ByteBuffer serializeAsJsonSerializable(Object jsonSerializable) throws IOException { AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (Closeable jsonWriter = JSON_WRITER_CREATOR.createJsonWriter(outputStream)) { JSON_WRITER_WRITE_JSON_SERIALIZABLE.writeJson(jsonWriter, jsonSerializable); } return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); }
return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count());
static ByteBuffer serializeAsJsonSerializable(Object jsonSerializable) throws IOException { try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); Closeable jsonWriter = JSON_WRITER_CREATOR.createJsonWriter(outputStream)) { JSON_WRITER_WRITE_JSON_SERIALIZABLE.writeJson(jsonWriter, jsonSerializable); JSON_WRITER_FLUSH.flush(jsonWriter); return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); } }
class ReflectionSerializable { private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class); private static final Class<?> JSON_SERIALIZABLE; private static final Class<?> JSON_READER; private static final CreateJsonReader JSON_READER_CREATOR; private static final CreateJsonWriter JSON_WRITER_CREATOR; private static final JsonWriterWriteJson JSON_WRITER_WRITE_JSON_SERIALIZABLE; static final boolean JSON_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_JSON_CACHE; private static final Class<?> XML_SERIALIZABLE; private static final Class<?> XML_READER; private static final CreateXmlReader XML_READER_CREATOR; private static final CreateXmlWriter XML_WRITER_CREATOR; private static final XmlWriterWriteStartDocument XML_WRITER_WRITE_XML_START_DOCUMENT; private static final XmlWriterWriteXml XML_WRITER_WRITE_XML_SERIALIZABLE; static final boolean XML_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_XML_CACHE; static { MethodHandles.Lookup defaultLookup = MethodHandles.lookup(); Class<?> jsonSerializable = null; Class<?> jsonReader = null; CreateJsonReader jsonReaderCreator = null; CreateJsonWriter jsonWriterCreator = null; JsonWriterWriteJson jsonWriterWriteJsonSerializable = null; boolean jsonSerializableSupported = false; try { jsonSerializable = Class.forName("com.azure.json.JsonSerializable"); jsonReader = Class.forName("com.azure.json.JsonReader"); Class<?> jsonProviders = Class.forName("com.azure.json.JsonProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(jsonProviders); MethodHandle handle = lookup.unreflect(jsonProviders.getDeclaredMethod("createReader", byte[].class)); jsonReaderCreator = (CreateJsonReader) LambdaMetafactory.metafactory(defaultLookup, "createJsonReader", methodType(CreateJsonReader.class), methodType(Closeable.class, byte[].class), handle, handle.type()) .getTarget() .invoke(); handle = lookup.unreflect(jsonProviders.getDeclaredMethod("createWriter", OutputStream.class)); jsonWriterCreator = (CreateJsonWriter) LambdaMetafactory.metafactory(defaultLookup, "createJsonWriter", methodType(CreateJsonWriter.class), methodType(Closeable.class, OutputStream.class), handle, handle.type()) .getTarget() .invoke(); Class<?> jsonWriter = Class.forName("com.azure.json.JsonWriter"); handle = lookup.unreflect(jsonWriter.getDeclaredMethod("writeJson", jsonSerializable)); jsonWriterWriteJsonSerializable = (JsonWriterWriteJson) LambdaMetafactory.metafactory(defaultLookup, "writeJson", methodType(JsonWriterWriteJson.class), methodType(Object.class, Object.class, Object.class), handle, handle.type()) .getTarget() .invoke(); jsonSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "JsonSerializable serialization and deserialization isn't " + "supported. If it is required add a dependency of 'com.azure:azure-json', or another " + "dependencies which include 'com.azure:azure-json' as a transitive dependency. If your " + "application runs as expected this informational message can be ignored."); } else { throw (Error) e; } } JSON_SERIALIZABLE = jsonSerializable; JSON_READER = jsonReader; JSON_READER_CREATOR = jsonReaderCreator; JSON_WRITER_CREATOR = jsonWriterCreator; JSON_WRITER_WRITE_JSON_SERIALIZABLE = jsonWriterWriteJsonSerializable; JSON_SERIALIZABLE_SUPPORTED = jsonSerializableSupported; FROM_JSON_CACHE = JSON_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; Class<?> xmlSerializable = null; Class<?> xmlReader = null; CreateXmlReader xmlReaderCreator = null; CreateXmlWriter xmlWriterCreator = null; XmlWriterWriteStartDocument xmlWriterWriteStartDocument = null; XmlWriterWriteXml xmlWriterWriteXmlSerializable = null; boolean xmlSerializableSupported = false; try { xmlSerializable = Class.forName("com.azure.xml.XmlSerializable"); xmlReader = Class.forName("com.azure.xml.XmlReader"); Class<?> xmlProviders = Class.forName("com.azure.xml.XmlProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(xmlProviders); MethodHandle handle = lookup.unreflect(xmlProviders.getDeclaredMethod("createReader", byte[].class)); xmlReaderCreator = (CreateXmlReader) LambdaMetafactory.metafactory(defaultLookup, "createXmlReader", methodType(CreateXmlReader.class), methodType(AutoCloseable.class, byte[].class), handle, handle.type()) .getTarget() .invoke(); handle = lookup.unreflect(xmlProviders.getDeclaredMethod("createWriter", OutputStream.class)); xmlWriterCreator = (CreateXmlWriter) LambdaMetafactory.metafactory(defaultLookup, "createXmlWriter", methodType(CreateXmlWriter.class), methodType(AutoCloseable.class, OutputStream.class), handle, handle.type()) .getTarget() .invoke(); Class<?> xmlWriter = Class.forName("com.azure.xml.XmlWriter"); handle = lookup.unreflect(xmlWriter.getDeclaredMethod("writeStartDocument")); xmlWriterWriteStartDocument = (XmlWriterWriteStartDocument) LambdaMetafactory.metafactory(defaultLookup, "writeStartDocument", methodType(XmlWriterWriteStartDocument.class), methodType(Object.class, Object.class), handle, handle.type()) .getTarget() .invoke(); handle = lookup.unreflect(xmlWriter.getDeclaredMethod("writeXml", xmlSerializable)); xmlWriterWriteXmlSerializable = (XmlWriterWriteXml) LambdaMetafactory.metafactory(defaultLookup, "writeXml", methodType(XmlWriterWriteXml.class), methodType(Object.class, Object.class, Object.class), handle, handle.type()) .getTarget() .invoke(); xmlSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "XmlSerializable serialization and deserialization isn't supported. " + "If it is required add a dependency of 'com.azure:azure-xml', or another dependencies which " + "include 'com.azure:azure-xml' as a transitive dependency. If your application runs as expected " + "this informational message can be ignored."); } else { throw (Error) e; } } XML_SERIALIZABLE = xmlSerializable; XML_READER = xmlReader; XML_READER_CREATOR = xmlReaderCreator; XML_WRITER_CREATOR = xmlWriterCreator; XML_WRITER_WRITE_XML_START_DOCUMENT = xmlWriterWriteStartDocument; XML_WRITER_WRITE_XML_SERIALIZABLE = xmlWriterWriteXmlSerializable; XML_SERIALIZABLE_SUPPORTED = xmlSerializableSupported; FROM_XML_CACHE = XML_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; } /** * Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}. */ public static boolean supportsJsonSerializable(Class<?> bodyContentClass) { return JSON_SERIALIZABLE_SUPPORTED && JSON_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}. * @throws IOException If an error occurs during serialization. */ /** * Deserializes the {@code json} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}. * @param json The JSON being deserialized. * @return An instance of {@code jsonSerializable} based on the {@code json}. * @throws IOException If an error occurs during deserialization. */ public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException { if (FROM_JSON_CACHE.size() >= 10000) { FROM_JSON_CACHE.clear(); } MethodHandle readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(jsonSerializable.getDeclaredMethod("fromJson", JSON_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (Closeable jsonReader = JSON_READER_CREATOR.createJsonReader(json)) { return readJson.invoke(jsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateJsonWriter { Closeable createJsonWriter(OutputStream outputStream) throws IOException; } @FunctionalInterface private interface JsonWriterWriteJson { Object writeJson(Object jsonWriter, Object jsonSerializable) throws IOException; } @FunctionalInterface private interface CreateJsonReader { Closeable createJsonReader(byte[] bytes) throws IOException; } /** * Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}. */ public static boolean supportsXmlSerializable(Class<?> bodyContentClass) { return XML_SERIALIZABLE_SUPPORTED && XML_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}. * * @param bodyContent The {@code XmlSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code bodyContent}. * @throws IOException If the XmlWriter fails to close properly. */ static ByteBuffer serializeAsXmlSerializable(Object bodyContent) throws IOException { AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (AutoCloseable xmlWriter = XML_WRITER_CREATOR.createXmlWriter(outputStream)) { XML_WRITER_WRITE_XML_START_DOCUMENT.writeStartDocument(xmlWriter); XML_WRITER_WRITE_XML_SERIALIZABLE.writeXml(xmlWriter, bodyContent); } catch (Exception e) { throw new IOException(e); } return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); } /** * Deserializes the {@code xml} as an instance of {@code XmlSerializable}. * * @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}. * @param xml The XML being deserialized. * @return An instance of {@code xmlSerializable} based on the {@code xml}. * @throws IOException If the XmlReader fails to close properly. */ public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException { if (FROM_XML_CACHE.size() >= 10000) { FROM_XML_CACHE.clear(); } MethodHandle readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(xmlSerializable.getMethod("fromXml", XML_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (AutoCloseable xmlReader = XML_READER_CREATOR.createXmlReader(xml)) { return readXml.invoke(xmlReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateXmlWriter { AutoCloseable createXmlWriter(OutputStream outputStream) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteStartDocument { Object writeStartDocument(Object xmlWriter) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteXml { Object writeXml(Object xmlWriter, Object jsonSerializable) throws XMLStreamException; } @FunctionalInterface private interface CreateXmlReader { AutoCloseable createXmlReader(byte[] bytes) throws XMLStreamException; } private ReflectionSerializable() { } }
class ReflectionSerializable { private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class); private static final Class<?> JSON_SERIALIZABLE; private static final Class<?> JSON_READER; private static final CreateJsonReader JSON_READER_CREATOR; private static final CreateJsonWriter JSON_WRITER_CREATOR; private static final JsonWriterWriteJson JSON_WRITER_WRITE_JSON_SERIALIZABLE; private static final JsonWriterFlush JSON_WRITER_FLUSH; static final boolean JSON_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_JSON_CACHE; private static final Class<?> XML_SERIALIZABLE; private static final Class<?> XML_READER; private static final CreateXmlReader XML_READER_CREATOR; private static final CreateXmlWriter XML_WRITER_CREATOR; private static final XmlWriterWriteStartDocument XML_WRITER_WRITE_XML_START_DOCUMENT; private static final XmlWriterWriteXml XML_WRITER_WRITE_XML_SERIALIZABLE; private static final XmlWriterFlush XML_WRITER_FLUSH; static final boolean XML_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_XML_CACHE; static { MethodHandles.Lookup defaultLookup = MethodHandles.lookup(); Class<?> jsonSerializable = null; Class<?> jsonReader = null; CreateJsonReader jsonReaderCreator = null; CreateJsonWriter jsonWriterCreator = null; JsonWriterWriteJson jsonWriterWriteJsonSerializable = null; JsonWriterFlush jsonWriterFlush = null; boolean jsonSerializableSupported = false; try { jsonSerializable = Class.forName("com.azure.json.JsonSerializable"); jsonReader = Class.forName("com.azure.json.JsonReader"); Class<?> jsonProviders = Class.forName("com.azure.json.JsonProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(jsonProviders); jsonReaderCreator = createMetaFactory(jsonProviders.getDeclaredMethod("createReader", byte[].class), lookup, CreateJsonReader.class, methodType(Closeable.class, byte[].class), defaultLookup); jsonWriterCreator = createMetaFactory(jsonProviders.getDeclaredMethod("createWriter", OutputStream.class), lookup, CreateJsonWriter.class, methodType(Closeable.class, OutputStream.class), defaultLookup); Class<?> jsonWriter = Class.forName("com.azure.json.JsonWriter"); jsonWriterWriteJsonSerializable = createMetaFactory( jsonWriter.getDeclaredMethod("writeJson", jsonSerializable), lookup, JsonWriterWriteJson.class, methodType(Object.class, Object.class, Object.class), defaultLookup); jsonWriterFlush = createMetaFactory(jsonWriter.getDeclaredMethod("flush"), lookup, JsonWriterFlush.class, methodType(Object.class, Object.class), defaultLookup); jsonSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "JsonSerializable serialization and deserialization isn't " + "supported. If it is required add a dependency of 'com.azure:azure-json', or another " + "dependencies which include 'com.azure:azure-json' as a transitive dependency. If your " + "application runs as expected this informational message can be ignored."); } else { throw (Error) e; } } JSON_SERIALIZABLE = jsonSerializable; JSON_READER = jsonReader; JSON_READER_CREATOR = jsonReaderCreator; JSON_WRITER_CREATOR = jsonWriterCreator; JSON_WRITER_WRITE_JSON_SERIALIZABLE = jsonWriterWriteJsonSerializable; JSON_WRITER_FLUSH = jsonWriterFlush; JSON_SERIALIZABLE_SUPPORTED = jsonSerializableSupported; FROM_JSON_CACHE = JSON_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; Class<?> xmlSerializable = null; Class<?> xmlReader = null; CreateXmlReader xmlReaderCreator = null; CreateXmlWriter xmlWriterCreator = null; XmlWriterWriteStartDocument xmlWriterWriteStartDocument = null; XmlWriterWriteXml xmlWriterWriteXmlSerializable = null; XmlWriterFlush xmlWriterFlush = null; boolean xmlSerializableSupported = false; try { xmlSerializable = Class.forName("com.azure.xml.XmlSerializable"); xmlReader = Class.forName("com.azure.xml.XmlReader"); Class<?> xmlProviders = Class.forName("com.azure.xml.XmlProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(xmlProviders); xmlReaderCreator = createMetaFactory(xmlProviders.getDeclaredMethod("createReader", byte[].class), lookup, CreateXmlReader.class, methodType(AutoCloseable.class, byte[].class), defaultLookup); xmlWriterCreator = createMetaFactory(xmlProviders.getDeclaredMethod("createWriter", OutputStream.class), lookup, CreateXmlWriter.class, methodType(AutoCloseable.class, OutputStream.class), defaultLookup); Class<?> xmlWriter = Class.forName("com.azure.xml.XmlWriter"); xmlWriterWriteStartDocument = createMetaFactory(xmlWriter.getDeclaredMethod("writeStartDocument"), lookup, XmlWriterWriteStartDocument.class, methodType(Object.class, Object.class), defaultLookup); xmlWriterWriteXmlSerializable = createMetaFactory(xmlWriter.getDeclaredMethod("writeXml", xmlSerializable), lookup, XmlWriterWriteXml.class, methodType(Object.class, Object.class, Object.class), defaultLookup); xmlWriterFlush = createMetaFactory(xmlWriter.getDeclaredMethod("flush"), lookup, XmlWriterFlush.class, methodType(Object.class, Object.class), defaultLookup); xmlSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "XmlSerializable serialization and deserialization isn't supported. " + "If it is required add a dependency of 'com.azure:azure-xml', or another dependencies which " + "include 'com.azure:azure-xml' as a transitive dependency. If your application runs as expected " + "this informational message can be ignored."); } else { throw (Error) e; } } XML_SERIALIZABLE = xmlSerializable; XML_READER = xmlReader; XML_READER_CREATOR = xmlReaderCreator; XML_WRITER_CREATOR = xmlWriterCreator; XML_WRITER_WRITE_XML_START_DOCUMENT = xmlWriterWriteStartDocument; XML_WRITER_WRITE_XML_SERIALIZABLE = xmlWriterWriteXmlSerializable; XML_WRITER_FLUSH = xmlWriterFlush; XML_SERIALIZABLE_SUPPORTED = xmlSerializableSupported; FROM_XML_CACHE = XML_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; } @SuppressWarnings("unchecked") private static <T> T createMetaFactory(Method method, MethodHandles.Lookup unreflectLookup, Class<T> interfaceType, MethodType interfaceMethodType, MethodHandles.Lookup defaultLookup) throws Throwable { MethodHandle handle = unreflectLookup.unreflect(method); Method functionalMethod = interfaceType.getDeclaredMethods()[0]; return (T) LambdaMetafactory.metafactory(defaultLookup, functionalMethod.getName(), methodType(interfaceType), interfaceMethodType, handle, handle.type()) .getTarget() .invoke(); } /** * Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}. */ public static boolean supportsJsonSerializable(Class<?> bodyContentClass) { return JSON_SERIALIZABLE_SUPPORTED && JSON_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}. * @throws IOException If an error occurs during serialization. */ /** * Deserializes the {@code json} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}. * @param json The JSON being deserialized. * @return An instance of {@code jsonSerializable} based on the {@code json}. * @throws IOException If an error occurs during deserialization. */ public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException { if (!JSON_SERIALIZABLE_SUPPORTED) { return null; } if (FROM_JSON_CACHE.size() >= 10000) { FROM_JSON_CACHE.clear(); } MethodHandle readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(jsonSerializable.getDeclaredMethod("fromJson", JSON_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (Closeable jsonReader = JSON_READER_CREATOR.createJsonReader(json)) { return readJson.invoke(jsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateJsonWriter { Closeable createJsonWriter(OutputStream outputStream) throws IOException; } @FunctionalInterface private interface JsonWriterWriteJson { Object writeJson(Object jsonWriter, Object jsonSerializable) throws IOException; } @FunctionalInterface private interface JsonWriterFlush { Object flush(Object jsonWriter) throws IOException; } @FunctionalInterface private interface CreateJsonReader { Closeable createJsonReader(byte[] bytes) throws IOException; } /** * Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}. */ public static boolean supportsXmlSerializable(Class<?> bodyContentClass) { return XML_SERIALIZABLE_SUPPORTED && XML_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}. * * @param bodyContent The {@code XmlSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code bodyContent}. * @throws IOException If the XmlWriter fails to close properly. */ static ByteBuffer serializeAsXmlSerializable(Object bodyContent) throws IOException { try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); AutoCloseable xmlWriter = XML_WRITER_CREATOR.createXmlWriter(outputStream)) { XML_WRITER_WRITE_XML_START_DOCUMENT.writeStartDocument(xmlWriter); XML_WRITER_WRITE_XML_SERIALIZABLE.writeXml(xmlWriter, bodyContent); XML_WRITER_FLUSH.flush(xmlWriter); return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); } catch (IOException ex) { throw ex; } catch (Exception ex) { throw new IOException(ex); } } /** * Deserializes the {@code xml} as an instance of {@code XmlSerializable}. * * @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}. * @param xml The XML being deserialized. * @return An instance of {@code xmlSerializable} based on the {@code xml}. * @throws IOException If the XmlReader fails to close properly. */ public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException { if (!XML_SERIALIZABLE_SUPPORTED) { return null; } if (FROM_XML_CACHE.size() >= 10000) { FROM_XML_CACHE.clear(); } MethodHandle readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(xmlSerializable.getMethod("fromXml", XML_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (AutoCloseable xmlReader = XML_READER_CREATOR.createXmlReader(xml)) { return readXml.invoke(xmlReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateXmlWriter { AutoCloseable createXmlWriter(OutputStream outputStream) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteStartDocument { Object writeStartDocument(Object xmlWriter) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteXml { Object writeXml(Object xmlWriter, Object jsonSerializable) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterFlush { Object flush(Object xmlWriter) throws XMLStreamException; } @FunctionalInterface private interface CreateXmlReader { AutoCloseable createXmlReader(byte[] bytes) throws XMLStreamException; } private ReflectionSerializable() { } }
I can update it to do so, closing `AccessibleByteArrayOutputStream` is like closing `ByteArrayOutputStream` and is a no-op but if we ever change the implementation of `OutputStream` used this could hold on to resources
static ByteBuffer serializeAsJsonSerializable(Object jsonSerializable) throws IOException { AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (Closeable jsonWriter = JSON_WRITER_CREATOR.createJsonWriter(outputStream)) { JSON_WRITER_WRITE_JSON_SERIALIZABLE.writeJson(jsonWriter, jsonSerializable); } return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); }
return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count());
static ByteBuffer serializeAsJsonSerializable(Object jsonSerializable) throws IOException { try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); Closeable jsonWriter = JSON_WRITER_CREATOR.createJsonWriter(outputStream)) { JSON_WRITER_WRITE_JSON_SERIALIZABLE.writeJson(jsonWriter, jsonSerializable); JSON_WRITER_FLUSH.flush(jsonWriter); return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); } }
class ReflectionSerializable { private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class); private static final Class<?> JSON_SERIALIZABLE; private static final Class<?> JSON_READER; private static final CreateJsonReader JSON_READER_CREATOR; private static final CreateJsonWriter JSON_WRITER_CREATOR; private static final JsonWriterWriteJson JSON_WRITER_WRITE_JSON_SERIALIZABLE; static final boolean JSON_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_JSON_CACHE; private static final Class<?> XML_SERIALIZABLE; private static final Class<?> XML_READER; private static final CreateXmlReader XML_READER_CREATOR; private static final CreateXmlWriter XML_WRITER_CREATOR; private static final XmlWriterWriteStartDocument XML_WRITER_WRITE_XML_START_DOCUMENT; private static final XmlWriterWriteXml XML_WRITER_WRITE_XML_SERIALIZABLE; static final boolean XML_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_XML_CACHE; static { MethodHandles.Lookup defaultLookup = MethodHandles.lookup(); Class<?> jsonSerializable = null; Class<?> jsonReader = null; CreateJsonReader jsonReaderCreator = null; CreateJsonWriter jsonWriterCreator = null; JsonWriterWriteJson jsonWriterWriteJsonSerializable = null; boolean jsonSerializableSupported = false; try { jsonSerializable = Class.forName("com.azure.json.JsonSerializable"); jsonReader = Class.forName("com.azure.json.JsonReader"); Class<?> jsonProviders = Class.forName("com.azure.json.JsonProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(jsonProviders); MethodHandle handle = lookup.unreflect(jsonProviders.getDeclaredMethod("createReader", byte[].class)); jsonReaderCreator = (CreateJsonReader) LambdaMetafactory.metafactory(defaultLookup, "createJsonReader", methodType(CreateJsonReader.class), methodType(Closeable.class, byte[].class), handle, handle.type()) .getTarget() .invoke(); handle = lookup.unreflect(jsonProviders.getDeclaredMethod("createWriter", OutputStream.class)); jsonWriterCreator = (CreateJsonWriter) LambdaMetafactory.metafactory(defaultLookup, "createJsonWriter", methodType(CreateJsonWriter.class), methodType(Closeable.class, OutputStream.class), handle, handle.type()) .getTarget() .invoke(); Class<?> jsonWriter = Class.forName("com.azure.json.JsonWriter"); handle = lookup.unreflect(jsonWriter.getDeclaredMethod("writeJson", jsonSerializable)); jsonWriterWriteJsonSerializable = (JsonWriterWriteJson) LambdaMetafactory.metafactory(defaultLookup, "writeJson", methodType(JsonWriterWriteJson.class), methodType(Object.class, Object.class, Object.class), handle, handle.type()) .getTarget() .invoke(); jsonSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "JsonSerializable serialization and deserialization isn't " + "supported. If it is required add a dependency of 'com.azure:azure-json', or another " + "dependencies which include 'com.azure:azure-json' as a transitive dependency. If your " + "application runs as expected this informational message can be ignored."); } else { throw (Error) e; } } JSON_SERIALIZABLE = jsonSerializable; JSON_READER = jsonReader; JSON_READER_CREATOR = jsonReaderCreator; JSON_WRITER_CREATOR = jsonWriterCreator; JSON_WRITER_WRITE_JSON_SERIALIZABLE = jsonWriterWriteJsonSerializable; JSON_SERIALIZABLE_SUPPORTED = jsonSerializableSupported; FROM_JSON_CACHE = JSON_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; Class<?> xmlSerializable = null; Class<?> xmlReader = null; CreateXmlReader xmlReaderCreator = null; CreateXmlWriter xmlWriterCreator = null; XmlWriterWriteStartDocument xmlWriterWriteStartDocument = null; XmlWriterWriteXml xmlWriterWriteXmlSerializable = null; boolean xmlSerializableSupported = false; try { xmlSerializable = Class.forName("com.azure.xml.XmlSerializable"); xmlReader = Class.forName("com.azure.xml.XmlReader"); Class<?> xmlProviders = Class.forName("com.azure.xml.XmlProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(xmlProviders); MethodHandle handle = lookup.unreflect(xmlProviders.getDeclaredMethod("createReader", byte[].class)); xmlReaderCreator = (CreateXmlReader) LambdaMetafactory.metafactory(defaultLookup, "createXmlReader", methodType(CreateXmlReader.class), methodType(AutoCloseable.class, byte[].class), handle, handle.type()) .getTarget() .invoke(); handle = lookup.unreflect(xmlProviders.getDeclaredMethod("createWriter", OutputStream.class)); xmlWriterCreator = (CreateXmlWriter) LambdaMetafactory.metafactory(defaultLookup, "createXmlWriter", methodType(CreateXmlWriter.class), methodType(AutoCloseable.class, OutputStream.class), handle, handle.type()) .getTarget() .invoke(); Class<?> xmlWriter = Class.forName("com.azure.xml.XmlWriter"); handle = lookup.unreflect(xmlWriter.getDeclaredMethod("writeStartDocument")); xmlWriterWriteStartDocument = (XmlWriterWriteStartDocument) LambdaMetafactory.metafactory(defaultLookup, "writeStartDocument", methodType(XmlWriterWriteStartDocument.class), methodType(Object.class, Object.class), handle, handle.type()) .getTarget() .invoke(); handle = lookup.unreflect(xmlWriter.getDeclaredMethod("writeXml", xmlSerializable)); xmlWriterWriteXmlSerializable = (XmlWriterWriteXml) LambdaMetafactory.metafactory(defaultLookup, "writeXml", methodType(XmlWriterWriteXml.class), methodType(Object.class, Object.class, Object.class), handle, handle.type()) .getTarget() .invoke(); xmlSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "XmlSerializable serialization and deserialization isn't supported. " + "If it is required add a dependency of 'com.azure:azure-xml', or another dependencies which " + "include 'com.azure:azure-xml' as a transitive dependency. If your application runs as expected " + "this informational message can be ignored."); } else { throw (Error) e; } } XML_SERIALIZABLE = xmlSerializable; XML_READER = xmlReader; XML_READER_CREATOR = xmlReaderCreator; XML_WRITER_CREATOR = xmlWriterCreator; XML_WRITER_WRITE_XML_START_DOCUMENT = xmlWriterWriteStartDocument; XML_WRITER_WRITE_XML_SERIALIZABLE = xmlWriterWriteXmlSerializable; XML_SERIALIZABLE_SUPPORTED = xmlSerializableSupported; FROM_XML_CACHE = XML_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; } /** * Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}. */ public static boolean supportsJsonSerializable(Class<?> bodyContentClass) { return JSON_SERIALIZABLE_SUPPORTED && JSON_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}. * @throws IOException If an error occurs during serialization. */ /** * Deserializes the {@code json} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}. * @param json The JSON being deserialized. * @return An instance of {@code jsonSerializable} based on the {@code json}. * @throws IOException If an error occurs during deserialization. */ public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException { if (FROM_JSON_CACHE.size() >= 10000) { FROM_JSON_CACHE.clear(); } MethodHandle readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(jsonSerializable.getDeclaredMethod("fromJson", JSON_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (Closeable jsonReader = JSON_READER_CREATOR.createJsonReader(json)) { return readJson.invoke(jsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateJsonWriter { Closeable createJsonWriter(OutputStream outputStream) throws IOException; } @FunctionalInterface private interface JsonWriterWriteJson { Object writeJson(Object jsonWriter, Object jsonSerializable) throws IOException; } @FunctionalInterface private interface CreateJsonReader { Closeable createJsonReader(byte[] bytes) throws IOException; } /** * Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}. */ public static boolean supportsXmlSerializable(Class<?> bodyContentClass) { return XML_SERIALIZABLE_SUPPORTED && XML_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}. * * @param bodyContent The {@code XmlSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code bodyContent}. * @throws IOException If the XmlWriter fails to close properly. */ static ByteBuffer serializeAsXmlSerializable(Object bodyContent) throws IOException { AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (AutoCloseable xmlWriter = XML_WRITER_CREATOR.createXmlWriter(outputStream)) { XML_WRITER_WRITE_XML_START_DOCUMENT.writeStartDocument(xmlWriter); XML_WRITER_WRITE_XML_SERIALIZABLE.writeXml(xmlWriter, bodyContent); } catch (Exception e) { throw new IOException(e); } return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); } /** * Deserializes the {@code xml} as an instance of {@code XmlSerializable}. * * @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}. * @param xml The XML being deserialized. * @return An instance of {@code xmlSerializable} based on the {@code xml}. * @throws IOException If the XmlReader fails to close properly. */ public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException { if (FROM_XML_CACHE.size() >= 10000) { FROM_XML_CACHE.clear(); } MethodHandle readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(xmlSerializable.getMethod("fromXml", XML_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (AutoCloseable xmlReader = XML_READER_CREATOR.createXmlReader(xml)) { return readXml.invoke(xmlReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateXmlWriter { AutoCloseable createXmlWriter(OutputStream outputStream) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteStartDocument { Object writeStartDocument(Object xmlWriter) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteXml { Object writeXml(Object xmlWriter, Object jsonSerializable) throws XMLStreamException; } @FunctionalInterface private interface CreateXmlReader { AutoCloseable createXmlReader(byte[] bytes) throws XMLStreamException; } private ReflectionSerializable() { } }
class ReflectionSerializable { private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class); private static final Class<?> JSON_SERIALIZABLE; private static final Class<?> JSON_READER; private static final CreateJsonReader JSON_READER_CREATOR; private static final CreateJsonWriter JSON_WRITER_CREATOR; private static final JsonWriterWriteJson JSON_WRITER_WRITE_JSON_SERIALIZABLE; private static final JsonWriterFlush JSON_WRITER_FLUSH; static final boolean JSON_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_JSON_CACHE; private static final Class<?> XML_SERIALIZABLE; private static final Class<?> XML_READER; private static final CreateXmlReader XML_READER_CREATOR; private static final CreateXmlWriter XML_WRITER_CREATOR; private static final XmlWriterWriteStartDocument XML_WRITER_WRITE_XML_START_DOCUMENT; private static final XmlWriterWriteXml XML_WRITER_WRITE_XML_SERIALIZABLE; private static final XmlWriterFlush XML_WRITER_FLUSH; static final boolean XML_SERIALIZABLE_SUPPORTED; private static final Map<Class<?>, MethodHandle> FROM_XML_CACHE; static { MethodHandles.Lookup defaultLookup = MethodHandles.lookup(); Class<?> jsonSerializable = null; Class<?> jsonReader = null; CreateJsonReader jsonReaderCreator = null; CreateJsonWriter jsonWriterCreator = null; JsonWriterWriteJson jsonWriterWriteJsonSerializable = null; JsonWriterFlush jsonWriterFlush = null; boolean jsonSerializableSupported = false; try { jsonSerializable = Class.forName("com.azure.json.JsonSerializable"); jsonReader = Class.forName("com.azure.json.JsonReader"); Class<?> jsonProviders = Class.forName("com.azure.json.JsonProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(jsonProviders); jsonReaderCreator = createMetaFactory(jsonProviders.getDeclaredMethod("createReader", byte[].class), lookup, CreateJsonReader.class, methodType(Closeable.class, byte[].class), defaultLookup); jsonWriterCreator = createMetaFactory(jsonProviders.getDeclaredMethod("createWriter", OutputStream.class), lookup, CreateJsonWriter.class, methodType(Closeable.class, OutputStream.class), defaultLookup); Class<?> jsonWriter = Class.forName("com.azure.json.JsonWriter"); jsonWriterWriteJsonSerializable = createMetaFactory( jsonWriter.getDeclaredMethod("writeJson", jsonSerializable), lookup, JsonWriterWriteJson.class, methodType(Object.class, Object.class, Object.class), defaultLookup); jsonWriterFlush = createMetaFactory(jsonWriter.getDeclaredMethod("flush"), lookup, JsonWriterFlush.class, methodType(Object.class, Object.class), defaultLookup); jsonSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "JsonSerializable serialization and deserialization isn't " + "supported. If it is required add a dependency of 'com.azure:azure-json', or another " + "dependencies which include 'com.azure:azure-json' as a transitive dependency. If your " + "application runs as expected this informational message can be ignored."); } else { throw (Error) e; } } JSON_SERIALIZABLE = jsonSerializable; JSON_READER = jsonReader; JSON_READER_CREATOR = jsonReaderCreator; JSON_WRITER_CREATOR = jsonWriterCreator; JSON_WRITER_WRITE_JSON_SERIALIZABLE = jsonWriterWriteJsonSerializable; JSON_WRITER_FLUSH = jsonWriterFlush; JSON_SERIALIZABLE_SUPPORTED = jsonSerializableSupported; FROM_JSON_CACHE = JSON_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; Class<?> xmlSerializable = null; Class<?> xmlReader = null; CreateXmlReader xmlReaderCreator = null; CreateXmlWriter xmlWriterCreator = null; XmlWriterWriteStartDocument xmlWriterWriteStartDocument = null; XmlWriterWriteXml xmlWriterWriteXmlSerializable = null; XmlWriterFlush xmlWriterFlush = null; boolean xmlSerializableSupported = false; try { xmlSerializable = Class.forName("com.azure.xml.XmlSerializable"); xmlReader = Class.forName("com.azure.xml.XmlReader"); Class<?> xmlProviders = Class.forName("com.azure.xml.XmlProviders"); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(xmlProviders); xmlReaderCreator = createMetaFactory(xmlProviders.getDeclaredMethod("createReader", byte[].class), lookup, CreateXmlReader.class, methodType(AutoCloseable.class, byte[].class), defaultLookup); xmlWriterCreator = createMetaFactory(xmlProviders.getDeclaredMethod("createWriter", OutputStream.class), lookup, CreateXmlWriter.class, methodType(AutoCloseable.class, OutputStream.class), defaultLookup); Class<?> xmlWriter = Class.forName("com.azure.xml.XmlWriter"); xmlWriterWriteStartDocument = createMetaFactory(xmlWriter.getDeclaredMethod("writeStartDocument"), lookup, XmlWriterWriteStartDocument.class, methodType(Object.class, Object.class), defaultLookup); xmlWriterWriteXmlSerializable = createMetaFactory(xmlWriter.getDeclaredMethod("writeXml", xmlSerializable), lookup, XmlWriterWriteXml.class, methodType(Object.class, Object.class, Object.class), defaultLookup); xmlWriterFlush = createMetaFactory(xmlWriter.getDeclaredMethod("flush"), lookup, XmlWriterFlush.class, methodType(Object.class, Object.class), defaultLookup); xmlSerializableSupported = true; } catch (Throwable e) { if (e instanceof LinkageError || e instanceof Exception) { LOGGER.log(LogLevel.VERBOSE, () -> "XmlSerializable serialization and deserialization isn't supported. " + "If it is required add a dependency of 'com.azure:azure-xml', or another dependencies which " + "include 'com.azure:azure-xml' as a transitive dependency. If your application runs as expected " + "this informational message can be ignored."); } else { throw (Error) e; } } XML_SERIALIZABLE = xmlSerializable; XML_READER = xmlReader; XML_READER_CREATOR = xmlReaderCreator; XML_WRITER_CREATOR = xmlWriterCreator; XML_WRITER_WRITE_XML_START_DOCUMENT = xmlWriterWriteStartDocument; XML_WRITER_WRITE_XML_SERIALIZABLE = xmlWriterWriteXmlSerializable; XML_WRITER_FLUSH = xmlWriterFlush; XML_SERIALIZABLE_SUPPORTED = xmlSerializableSupported; FROM_XML_CACHE = XML_SERIALIZABLE_SUPPORTED ? new ConcurrentHashMap<>() : null; } @SuppressWarnings("unchecked") private static <T> T createMetaFactory(Method method, MethodHandles.Lookup unreflectLookup, Class<T> interfaceType, MethodType interfaceMethodType, MethodHandles.Lookup defaultLookup) throws Throwable { MethodHandle handle = unreflectLookup.unreflect(method); Method functionalMethod = interfaceType.getDeclaredMethods()[0]; return (T) LambdaMetafactory.metafactory(defaultLookup, functionalMethod.getName(), methodType(interfaceType), interfaceMethodType, handle, handle.type()) .getTarget() .invoke(); } /** * Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}. */ public static boolean supportsJsonSerializable(Class<?> bodyContentClass) { return JSON_SERIALIZABLE_SUPPORTED && JSON_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}. * @throws IOException If an error occurs during serialization. */ /** * Deserializes the {@code json} as an instance of {@code JsonSerializable}. * * @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}. * @param json The JSON being deserialized. * @return An instance of {@code jsonSerializable} based on the {@code json}. * @throws IOException If an error occurs during deserialization. */ public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException { if (!JSON_SERIALIZABLE_SUPPORTED) { return null; } if (FROM_JSON_CACHE.size() >= 10000) { FROM_JSON_CACHE.clear(); } MethodHandle readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(jsonSerializable.getDeclaredMethod("fromJson", JSON_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (Closeable jsonReader = JSON_READER_CREATOR.createJsonReader(json)) { return readJson.invoke(jsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateJsonWriter { Closeable createJsonWriter(OutputStream outputStream) throws IOException; } @FunctionalInterface private interface JsonWriterWriteJson { Object writeJson(Object jsonWriter, Object jsonSerializable) throws IOException; } @FunctionalInterface private interface JsonWriterFlush { Object flush(Object jsonWriter) throws IOException; } @FunctionalInterface private interface CreateJsonReader { Closeable createJsonReader(byte[] bytes) throws IOException; } /** * Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it. * * @param bodyContentClass The body content class. * @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}. */ public static boolean supportsXmlSerializable(Class<?> bodyContentClass) { return XML_SERIALIZABLE_SUPPORTED && XML_SERIALIZABLE.isAssignableFrom(bodyContentClass); } /** * Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}. * * @param bodyContent The {@code XmlSerializable} body content. * @return The {@link ByteBuffer} representing the serialized {@code bodyContent}. * @throws IOException If the XmlWriter fails to close properly. */ static ByteBuffer serializeAsXmlSerializable(Object bodyContent) throws IOException { try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); AutoCloseable xmlWriter = XML_WRITER_CREATOR.createXmlWriter(outputStream)) { XML_WRITER_WRITE_XML_START_DOCUMENT.writeStartDocument(xmlWriter); XML_WRITER_WRITE_XML_SERIALIZABLE.writeXml(xmlWriter, bodyContent); XML_WRITER_FLUSH.flush(xmlWriter); return ByteBuffer.wrap(outputStream.toByteArray(), 0, outputStream.count()); } catch (IOException ex) { throw ex; } catch (Exception ex) { throw new IOException(ex); } } /** * Deserializes the {@code xml} as an instance of {@code XmlSerializable}. * * @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}. * @param xml The XML being deserialized. * @return An instance of {@code xmlSerializable} based on the {@code xml}. * @throws IOException If the XmlReader fails to close properly. */ public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException { if (!XML_SERIALIZABLE_SUPPORTED) { return null; } if (FROM_XML_CACHE.size() >= 10000) { FROM_XML_CACHE.clear(); } MethodHandle readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> { try { MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(clazz); return lookup.unreflect(xmlSerializable.getMethod("fromXml", XML_READER)); } catch (Exception e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } }); try (AutoCloseable xmlReader = XML_READER_CREATOR.createXmlReader(xml)) { return readXml.invoke(xmlReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof Exception) { throw new IOException(e); } else { throw (Error) e; } } } @FunctionalInterface private interface CreateXmlWriter { AutoCloseable createXmlWriter(OutputStream outputStream) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteStartDocument { Object writeStartDocument(Object xmlWriter) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterWriteXml { Object writeXml(Object xmlWriter, Object jsonSerializable) throws XMLStreamException; } @FunctionalInterface private interface XmlWriterFlush { Object flush(Object xmlWriter) throws XMLStreamException; } @FunctionalInterface private interface CreateXmlReader { AutoCloseable createXmlReader(byte[] bytes) throws XMLStreamException; } private ReflectionSerializable() { } }
nit, you may consider [mockito](https://site.mockito.org/) to mock class.
public static void setup() { profile = new AzureProfile(AzureEnvironment.AZURE); credential = new DefaultAzureCredentialBuilder().build(); if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) { testResourceGroup = ResourceManager .authenticate(credential, profile) .withDefaultSubscription() .resourceGroups() .define("acl-sdk-test-rg") .withRegion("eastus") .create(); } else { testResourceGroup = new MockResourceGroup("acl-sdk-test-rg"); } }
testResourceGroup = new MockResourceGroup("acl-sdk-test-rg");
public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); }
class ConfidentialLedgerManagementTestBase extends TestBase { public static AzureProfile profile; public static TokenCredential credential; public static ResourceGroup testResourceGroup; public ConfidentialLedgerManagementOperations ledgerOperations; @BeforeAll @AfterAll public static void cleanUp() { if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) { ResourceManager .authenticate(credential, profile) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(credential, profile); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(credential, profile); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(credential, profile); } ledgerOperations = new ConfidentialLedgerManagementOperations(ledgerManager); } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for(int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); } public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
you might want to use `TestBase.getTestMode()`
public static void cleanUp() { if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } }
if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) {
public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll @BeforeEach public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); } public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll @BeforeEach public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); } public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
If you cannot (as in static method), make sure you handle null value.
public static void cleanUp() { if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } }
if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) {
public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll @BeforeEach public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); } public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { if (!System.getenv("AZURE_TEST_MODE").equals("PLAYBACK")) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll @BeforeEach public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); } public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
You need to put the subscriptionId to AzureProfile in PLAYBACK, as there is no env variable in CI. You may use `new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE)` (that is your recorded subscriptionId). PS: That is the reason I'd prefer LIVE only.
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.authenticate(getCredential(), getAzureProfile());
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
Is there a way to mock the authentication part in the PLAYBACK mode?
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.authenticate(getCredential(), getAzureProfile());
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
Not sure what you mean by "mock the authentication part". The subscription ID is recorded to JSON, hence you always need to have it in PLAYBACK, mock or not. There is another approach handles this, which is add some code (I didn't remember where) to always replace the subscription ID to 0000-0000-... when RECORD. And provide this same all-0 in PLAYBACK.
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.authenticate(getCredential(), getAzureProfile());
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
Yes, I understand this part about the subscription ID needed and I added it in the code. What I mean is that the CI check is still failing, and I think it's due to not being able to authenticate because of as you said there are no environment variables in CI. So, I'm asking how to solve this issue?
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.authenticate(getCredential(), getAzureProfile());
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
I see. Please do not use the `DefaultAzureCredential`. In PLAYBACK, use a mocked credential like this https://github.com/Azure/azure-sdk-for-java/blob/b9a185753539828b13fdce1aa91427b0aa997898/sdk/devcenter/azure-developer-devcenter/src/test/java/com/azure/developer/devcenter/DevCenterClientTestBase.java#L38
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.authenticate(getCredential(), getAzureProfile());
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } public static TokenCredential getCredential() { return credential; } public static void setCredential() { credential = new DefaultAzureCredentialBuilder().build(); } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
@cochi2 Just to confirm is this correct? Since we dont have any prefix in subject line. I am not sure if we need to fix this in C# SDK as well?
public void parseRecognizeFailedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/RecognizeCompleted\",\n" + "\"type\": \"Microsoft.Communication.RecognizeFailed\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 400,\n" + "\"subCode\": 8510,\n" + "\"message\": \"Action failed, initial silence timeout reached.\"\n" + "},\n" + "\"type\": \"recognizeFailed\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId/RecognizeFailed\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecognizeFailed recognizeFailed = (RecognizeFailed) event; assertNotNull(recognizeFailed); assertEquals("serverCallId", recognizeFailed.getServerCallId()); assertEquals(400, recognizeFailed.getResultInformation().getCode()); assertEquals(ReasonCode.Recognize.INITIAL_SILENCE_TIMEOUT, recognizeFailed.getReasonCode()); }
+ "\"subject\": \"calling/callConnections/callConnectionId/RecognizeFailed\"\n"
public void parseRecognizeFailedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/RecognizeCompleted\",\n" + "\"type\": \"Microsoft.Communication.RecognizeFailed\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 400,\n" + "\"subCode\": 8510,\n" + "\"message\": \"Action failed, initial silence timeout reached.\"\n" + "},\n" + "\"type\": \"recognizeFailed\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecognizeFailed recognizeFailed = (RecognizeFailed) event; assertNotNull(recognizeFailed); assertEquals("serverCallId", recognizeFailed.getServerCallId()); assertEquals(400, recognizeFailed.getResultInformation().getCode()); assertEquals(ReasonCode.Recognize.INITIAL_SILENCE_TIMEOUT, recognizeFailed.getReasonCode()); }
class EventHandlerUnitTests { static final String EVENT_PARTICIPANT_UPDATED = "{\"id\":\"61069ef9-5ca9-457f-ac36-e2bb5e8400ca\",\"source\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/ParticipantsUpdated\",\"type\":\"Microsoft.Communication.ParticipantsUpdated\",\"data\":{\"participants\":[{\"rawId\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff6-dd51-54b7-a43a0d001998\",\"kind\":\"communicationUser\",\"communicationUser\":{\"id\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff6-dd51-54b7-a43a0d001998\"}},{\"rawId\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff7-1579-99bf-a43a0d0010bc\",\"kind\":\"communicationUser\",\"communicationUser\":{\"id\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff7-1579-99bf-a43a0d0010bc\"}}],\"type\":\"participantsUpdated\",\"callConnectionId\":\"401f3500-62bd-46a9-8c09-9e1b06caca01\",\"correlationId\":\"ebd8bf1f-0794-494f-bdda-913042c06ef7\"},\"time\":\"2022-08-12T03:35:07.9129474+00:00\",\"specversion\":\"1.0\",\"datacontenttype\":\"application/json\",\"subject\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/ParticipantsUpdated\"}"; static final String EVENT_CALL_CONNECTED = "{\"id\":\"46116fb7-27e0-4a99-9478-a659c8fd4815\",\"source\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/CallConnected\",\"type\":\"Microsoft.Communication.CallConnected\",\"data\":{\"type\":\"callConnected\",\"callConnectionId\":\"401f3500-62bd-46a9-8c09-9e1b06caca01\",\"correlationId\":\"ebd8bf1f-0794-494f-bdda-913042c06ef7\"},\"time\":\"2022-08-12T03:35:07.8174402+00:00\",\"specversion\":\"1.0\",\"datacontenttype\":\"application/json\",\"subject\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/CallConnected\"}"; @Test public void parseEvent() { CallAutomationEventBase callAutomationEventBase = EventHandler.parseEvent(EVENT_PARTICIPANT_UPDATED); assertNotNull(callAutomationEventBase); assertEquals(callAutomationEventBase.getClass(), ParticipantsUpdatedEvent.class); assertNotNull(((ParticipantsUpdatedEvent) callAutomationEventBase).getParticipants()); } @Test public void parseEventList() { List<CallAutomationEventBase> callAutomationEventBaseList = EventHandler.parseEventList("[" + EVENT_CALL_CONNECTED + "," + EVENT_PARTICIPANT_UPDATED + "]"); assertNotNull(callAutomationEventBaseList); assertEquals(callAutomationEventBaseList.get(0).getClass(), CallConnectedEvent.class); assertEquals(callAutomationEventBaseList.get(1).getClass(), ParticipantsUpdatedEvent.class); assertNotNull(callAutomationEventBaseList.get(0).getCallConnectionId()); } @Test public void parseRecordingStateChangedEvent() { String receivedEvent = "[\n" + " {\n" + " \"id\": \"bf59843a-888f-47ca-8d1c-885c1f5e71dc\",\n" + " \"source\": \"calling/recordings/serverCallId/recordingId/recordingId/RecordingStateChanged\",\n" + " \"type\": \"Microsoft.Communication.CallRecordingStateChanged\",\n" + " \"data\": {\n" + " \"type\": \"recordingStateChanged\",\n" + " \"recordingId\": \"recordingId\",\n" + " \"state\": \"active\",\n" + " \"startDateTime\": \"2022-08-11T23:42:45.4394211+00:00\",\n" + " \"callConnectionId\": \"callConnectionId\",\n" + " \"serverCallId\": \"serverCallId\",\n" + " \"correlationId\": \"correlationId\"\n" + " },\n" + " \"time\": \"2022-08-11T23:42:45.5346632+00:00\",\n" + " \"specversion\": \"1.0\",\n" + " \"datacontenttype\": \"application/json\",\n" + " \"subject\": \"calling/recordings/serverCallId/recordingId/recordingId/RecordingStateChanged\"\n" + " }\n" + "]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecordingStateChangedEvent recordingEvent = (RecordingStateChangedEvent) event; assertNotNull(recordingEvent); assertEquals("serverCallId", recordingEvent.getServerCallId()); assertEquals("recordingId", recordingEvent.getRecordingId()); assertEquals(RecordingState.ACTIVE, recordingEvent.getRecordingState()); } @Test public void parsePlayCompletedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/PlayCompletedEvent\",\n" + "\"type\": \"Microsoft.Communication.PlayCompleted\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 200,\n" + "\"subCode\": 0,\n" + "\"message\": \"Action completed successfully.\"\n" + "},\n" + "\"type\": \"playCompletedEvent\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId/PlayCompletedEvent\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); PlayCompletedEvent playCompletedEvent = (PlayCompletedEvent) event; assertNotNull(playCompletedEvent); assertEquals("serverCallId", playCompletedEvent.getServerCallId()); assertEquals(200, playCompletedEvent.getResultInformation().getCode()); assertEquals(ReasonCode.COMPLETED_SUCCESSFULLY, playCompletedEvent.getReasonCode()); } @Test public void parsePlayFailedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/PlayFailedEvent\",\n" + "\"type\": \"Microsoft.Communication.PlayFailed\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 400,\n" + "\"subCode\": 8536,\n" + "\"message\": \"Action failed, file could not be downloaded.\"\n" + "},\n" + "\"type\": \"playFailedEvent\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId/PlayFailedEvent\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); PlayFailedEvent playFailedEvent = (PlayFailedEvent) event; assertNotNull(playFailedEvent); assertEquals("serverCallId", playFailedEvent.getServerCallId()); assertEquals(400, playFailedEvent.getResultInformation().getCode()); assertEquals(ReasonCode.Play.DOWNLOAD_FAILED, playFailedEvent.getReasonCode()); } @Test public void parsePlayCanceledEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/PlayCanceled\",\n" + "\"type\": \"Microsoft.Communication.PlayCanceled\",\n" + "\"data\": {\n" + "\"type\": \"playCanceled\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId/PlayCanceled\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); PlayCanceled playCanceled = (PlayCanceled) event; assertNotNull(playCanceled); assertEquals("serverCallId", playCanceled.getServerCallId()); } @Test public void parseRecognizeCompletedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/RecognizeCompleted\",\n" + "\"type\": \"Microsoft.Communication.RecognizeCompleted\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 200,\n" + "\"subCode\": 0,\n" + "\"message\": \"Action completed successfully.\"\n" + "},\n" + "\"type\": \"recognizeCompleted\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId/RecognizeCompleted\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecognizeCompleted recognizeCompleted = (RecognizeCompleted) event; assertNotNull(recognizeCompleted); assertEquals("serverCallId", recognizeCompleted.getServerCallId()); assertEquals(200, recognizeCompleted.getResultInformation().getCode()); assertEquals(ReasonCode.COMPLETED_SUCCESSFULLY, recognizeCompleted.getReasonCode()); } @Test @Test public void parseRecognizeCanceledEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/RecognizeCanceled\",\n" + "\"type\": \"Microsoft.Communication.RecognizeCanceled\",\n" + "\"data\": {\n" + "\"type\": \"recognizeCanceled\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId/RecognizeCanceled\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecognizeCanceled recognizeCanceled = (RecognizeCanceled) event; assertNotNull(recognizeCanceled); assertEquals("serverCallId", recognizeCanceled.getServerCallId()); } }
class EventHandlerUnitTests { static final String EVENT_PARTICIPANT_UPDATED = "{\"id\":\"61069ef9-5ca9-457f-ac36-e2bb5e8400ca\",\"source\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/ParticipantsUpdated\",\"type\":\"Microsoft.Communication.ParticipantsUpdated\",\"data\":{\"participants\":[{\"rawId\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff6-dd51-54b7-a43a0d001998\",\"kind\":\"communicationUser\",\"communicationUser\":{\"id\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff6-dd51-54b7-a43a0d001998\"}},{\"rawId\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff7-1579-99bf-a43a0d0010bc\",\"kind\":\"communicationUser\",\"communicationUser\":{\"id\":\"8:acs:816df1ca-971b-44d7-b8b1-8fba90748500_00000013-2ff7-1579-99bf-a43a0d0010bc\"}}],\"type\":\"participantsUpdated\",\"callConnectionId\":\"401f3500-62bd-46a9-8c09-9e1b06caca01\",\"correlationId\":\"ebd8bf1f-0794-494f-bdda-913042c06ef7\"},\"time\":\"2022-08-12T03:35:07.9129474+00:00\",\"specversion\":\"1.0\",\"datacontenttype\":\"application/json\",\"subject\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/ParticipantsUpdated\"}"; static final String EVENT_CALL_CONNECTED = "{\"id\":\"46116fb7-27e0-4a99-9478-a659c8fd4815\",\"source\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/CallConnected\",\"type\":\"Microsoft.Communication.CallConnected\",\"data\":{\"type\":\"callConnected\",\"callConnectionId\":\"401f3500-62bd-46a9-8c09-9e1b06caca01\",\"correlationId\":\"ebd8bf1f-0794-494f-bdda-913042c06ef7\"},\"time\":\"2022-08-12T03:35:07.8174402+00:00\",\"specversion\":\"1.0\",\"datacontenttype\":\"application/json\",\"subject\":\"calling/callConnections/401f3500-62bd-46a9-8c09-9e1b06caca01/CallConnected\"}"; @Test public void parseEvent() { CallAutomationEventBase callAutomationEventBase = EventHandler.parseEvent(EVENT_PARTICIPANT_UPDATED); assertNotNull(callAutomationEventBase); assertEquals(callAutomationEventBase.getClass(), ParticipantsUpdatedEvent.class); assertNotNull(((ParticipantsUpdatedEvent) callAutomationEventBase).getParticipants()); } @Test public void parseEventList() { List<CallAutomationEventBase> callAutomationEventBaseList = EventHandler.parseEventList("[" + EVENT_CALL_CONNECTED + "," + EVENT_PARTICIPANT_UPDATED + "]"); assertNotNull(callAutomationEventBaseList); assertEquals(callAutomationEventBaseList.get(0).getClass(), CallConnectedEvent.class); assertEquals(callAutomationEventBaseList.get(1).getClass(), ParticipantsUpdatedEvent.class); assertNotNull(callAutomationEventBaseList.get(0).getCallConnectionId()); } @Test public void parseRecordingStateChangedEvent() { String receivedEvent = "[\n" + " {\n" + " \"id\": \"bf59843a-888f-47ca-8d1c-885c1f5e71dc\",\n" + " \"source\": \"calling/recordings/serverCallId/recordingId/recordingId/RecordingStateChanged\",\n" + " \"type\": \"Microsoft.Communication.CallRecordingStateChanged\",\n" + " \"data\": {\n" + " \"type\": \"recordingStateChanged\",\n" + " \"recordingId\": \"recordingId\",\n" + " \"state\": \"active\",\n" + " \"startDateTime\": \"2022-08-11T23:42:45.4394211+00:00\",\n" + " \"callConnectionId\": \"callConnectionId\",\n" + " \"serverCallId\": \"serverCallId\",\n" + " \"correlationId\": \"correlationId\"\n" + " },\n" + " \"time\": \"2022-08-11T23:42:45.5346632+00:00\",\n" + " \"specversion\": \"1.0\",\n" + " \"datacontenttype\": \"application/json\",\n" + " \"subject\": \"calling/recordings/serverCallId/recordingId/recordingId\"\n" + " }\n" + "]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecordingStateChangedEvent recordingEvent = (RecordingStateChangedEvent) event; assertNotNull(recordingEvent); assertEquals("serverCallId", recordingEvent.getServerCallId()); assertEquals("recordingId", recordingEvent.getRecordingId()); assertEquals(RecordingState.ACTIVE, recordingEvent.getRecordingState()); } @Test public void parsePlayCompletedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/PlayCompleted\",\n" + "\"type\": \"Microsoft.Communication.PlayCompleted\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 200,\n" + "\"subCode\": 0,\n" + "\"message\": \"Action completed successfully.\"\n" + "},\n" + "\"type\": \"playCompletedEvent\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); PlayCompletedEvent playCompletedEvent = (PlayCompletedEvent) event; assertNotNull(playCompletedEvent); assertEquals("serverCallId", playCompletedEvent.getServerCallId()); assertEquals(200, playCompletedEvent.getResultInformation().getCode()); assertEquals(ReasonCode.COMPLETED_SUCCESSFULLY, playCompletedEvent.getReasonCode()); } @Test public void parsePlayFailedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/PlayFailed\",\n" + "\"type\": \"Microsoft.Communication.PlayFailed\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 400,\n" + "\"subCode\": 8536,\n" + "\"message\": \"Action failed, file could not be downloaded.\"\n" + "},\n" + "\"type\": \"playFailedEvent\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); PlayFailedEvent playFailedEvent = (PlayFailedEvent) event; assertNotNull(playFailedEvent); assertEquals("serverCallId", playFailedEvent.getServerCallId()); assertEquals(400, playFailedEvent.getResultInformation().getCode()); assertEquals(ReasonCode.Play.DOWNLOAD_FAILED, playFailedEvent.getReasonCode()); } @Test public void parsePlayCanceledEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/PlayCanceled\",\n" + "\"type\": \"Microsoft.Communication.PlayCanceled\",\n" + "\"data\": {\n" + "\"type\": \"playCanceled\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); PlayCanceled playCanceled = (PlayCanceled) event; assertNotNull(playCanceled); assertEquals("serverCallId", playCanceled.getServerCallId()); } @Test public void parseRecognizeCompletedEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/RecognizeCompleted\",\n" + "\"type\": \"Microsoft.Communication.RecognizeCompleted\",\n" + "\"data\": {\n" + "\"resultInformation\": {\n" + "\"code\": 200,\n" + "\"subCode\": 0,\n" + "\"message\": \"Action completed successfully.\"\n" + "},\n" + "\"type\": \"recognizeCompleted\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecognizeCompleted recognizeCompleted = (RecognizeCompleted) event; assertNotNull(recognizeCompleted); assertEquals("serverCallId", recognizeCompleted.getServerCallId()); assertEquals(200, recognizeCompleted.getResultInformation().getCode()); assertEquals(ReasonCode.COMPLETED_SUCCESSFULLY, recognizeCompleted.getReasonCode()); } @Test @Test public void parseRecognizeCanceledEvent() { String receivedEvent = "[{\n" + "\"id\": \"704a7a96-4d74-4ebe-9cd0-b7cc39c3d7b1\",\n" + "\"source\": \"calling/callConnections/callConnectionId/RecognizeCanceled\",\n" + "\"type\": \"Microsoft.Communication.RecognizeCanceled\",\n" + "\"data\": {\n" + "\"type\": \"recognizeCanceled\",\n" + "\"callConnectionId\": \"callConnectionId\",\n" + "\"serverCallId\": \"serverCallId\",\n" + "\"correlationId\": \"correlationId\"\n" + "},\n" + "\"time\": \"2022-08-12T03:13:25.0252763+00:00\",\n" + "\"specversion\": \"1.0\",\n" + "\"datacontenttype\": \"application/json\",\n" + "\"subject\": \"calling/callConnections/callConnectionId\"\n" + "}]"; CallAutomationEventBase event = EventHandler.parseEvent(receivedEvent); assertNotNull(event); RecognizeCanceled recognizeCanceled = (RecognizeCanceled) event; assertNotNull(recognizeCanceled); assertEquals("serverCallId", recognizeCanceled.getServerCallId()); } }
request headers won't be a null or empty list?
public Mono<HttpResponse> send(HttpRequest request, Context context) { ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter(); RequestOptions options = new RequestOptions() .setMethod(HttpMethod.valueOf(request.getHttpMethod().name())) .setAbsoluteURI(request.getUrl()); return Mono.create(sink -> client.request(options, requestResult -> { if (requestResult.failed()) { sink.error(requestResult.cause()); return; } HttpClientRequest vertxHttpRequest = requestResult.result(); vertxHttpRequest.exceptionHandler(sink::error); request.getHeaders().stream() .forEach(header -> vertxHttpRequest.putHeader(header.getName(), header.getValuesList())); if (request.getHeaders().get("Content-Length") == null) { vertxHttpRequest.setChunked(true); } vertxHttpRequest.response(event -> { if (event.succeeded()) { HttpClientResponse vertxHttpResponse = event.result(); vertxHttpResponse.exceptionHandler(sink::error); vertxHttpResponse.body(bodyEvent -> { if (bodyEvent.succeeded()) { sink.success(new BufferedVertxHttpResponse(request, vertxHttpResponse, bodyEvent.result())); } else { sink.error(bodyEvent.cause()); } }); } else { sink.error(event.cause()); } }); Flux<ByteBuffer> requestBody = request.getBody(); if (requestBody == null) { vertxHttpRequest.end(); } else { if (progressReporter != null) { requestBody = requestBody.map(buffer -> { progressReporter.reportProgress(buffer.remaining()); return buffer; }); } FluxUtil.collectBytesFromNetworkResponse(requestBody, request.getHeaders()) .subscribeOn(scheduler) .subscribe(bytes -> vertxHttpRequest.write(Buffer.buffer(Unpooled.wrappedBuffer(bytes))), sink::error, vertxHttpRequest::end); } })); }
if (request.getHeaders().get("Content-Length") == null) {
public Mono<HttpResponse> send(HttpRequest request, Context context) { ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter(); RequestOptions options = new RequestOptions() .setMethod(HttpMethod.valueOf(request.getHttpMethod().name())) .setAbsoluteURI(request.getUrl()); return Mono.create(sink -> client.request(options, requestResult -> { if (requestResult.failed()) { sink.error(requestResult.cause()); return; } HttpClientRequest vertxHttpRequest = requestResult.result(); vertxHttpRequest.exceptionHandler(sink::error); request.getHeaders().stream() .forEach(header -> vertxHttpRequest.putHeader(header.getName(), header.getValuesList())); if (request.getHeaders().get("Content-Length") == null) { vertxHttpRequest.setChunked(true); } vertxHttpRequest.response(event -> { if (event.succeeded()) { HttpClientResponse vertxHttpResponse = event.result(); vertxHttpResponse.exceptionHandler(sink::error); vertxHttpResponse.body(bodyEvent -> { if (bodyEvent.succeeded()) { sink.success(new BufferedVertxHttpResponse(request, vertxHttpResponse, bodyEvent.result())); } else { sink.error(bodyEvent.cause()); } }); } else { sink.error(event.cause()); } }); Flux<ByteBuffer> requestBody = request.getBody(); if (requestBody == null) { vertxHttpRequest.end(); } else { if (progressReporter != null) { requestBody = requestBody.map(buffer -> { progressReporter.reportProgress(buffer.remaining()); return buffer; }); } FluxUtil.collectBytesFromNetworkResponse(requestBody, request.getHeaders()) .subscribeOn(scheduler) .subscribe(bytes -> vertxHttpRequest.write(Buffer.buffer(Unpooled.wrappedBuffer(bytes))), sink::error, vertxHttpRequest::end); } })); }
class VertxAsyncHttpClient implements HttpClient { private final Scheduler scheduler; final io.vertx.core.http.HttpClient client; /** * Constructs a {@link VertxAsyncHttpClient}. * * @param client The Vert.x {@link io.vertx.core.http.HttpClient} */ VertxAsyncHttpClient(io.vertx.core.http.HttpClient client, Vertx vertx) { Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(vertx, "vertx cannot be null"); this.client = client; this.scheduler = Schedulers.fromExecutor(vertx.nettyEventLoopGroup()); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override }
class VertxAsyncHttpClient implements HttpClient { private final Scheduler scheduler; final io.vertx.core.http.HttpClient client; /** * Constructs a {@link VertxAsyncHttpClient}. * * @param client The Vert.x {@link io.vertx.core.http.HttpClient} */ VertxAsyncHttpClient(io.vertx.core.http.HttpClient client, Vertx vertx) { Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(vertx, "vertx cannot be null"); this.client = client; this.scheduler = Schedulers.fromExecutor(vertx.nettyEventLoopGroup()); } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override }
why no more defer?
public Flux<ByteBuffer> getBody() { return (body.length == 0) ? Flux.empty() : Flux.just(ByteBuffer.wrap(body)); }
return (body.length == 0) ? Flux.empty() : Flux.just(ByteBuffer.wrap(body));
public Flux<ByteBuffer> getBody() { return (body.length == 0) ? Flux.empty() : Flux.just(ByteBuffer.wrap(body)); }
class BufferedVertxHttpResponse extends VertxHttpResponseBase { private final byte[] body; public BufferedVertxHttpResponse(HttpRequest azureHttpRequest, HttpClientResponse vertxHttpResponse, Buffer body) { super(azureHttpRequest, vertxHttpResponse); this.body = body.getBytes(); } @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromBytes(body); } @Override @Override public Mono<byte[]> getBodyAsByteArray() { return (body.length == 0) ? Mono.empty() : Mono.just(body); } @Override public HttpResponse buffer() { return this; } }
class BufferedVertxHttpResponse extends VertxHttpResponseBase { private final byte[] body; public BufferedVertxHttpResponse(HttpRequest azureHttpRequest, HttpClientResponse vertxHttpResponse, Buffer body) { super(azureHttpRequest, vertxHttpResponse); this.body = body.getBytes(); } @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromBytes(body); } @Override @Override public Mono<byte[]> getBodyAsByteArray() { return (body.length == 0) ? Mono.empty() : Mono.just(body); } @Override public HttpResponse buffer() { return this; } }
Defer is only necessary is the `Flux` or `Mono` creation is costly, such as making a request, and the processing should be deferred until subscribed. In this case all data is already available, so deferring is just adding overhead with no benefit.
public Flux<ByteBuffer> getBody() { return (body.length == 0) ? Flux.empty() : Flux.just(ByteBuffer.wrap(body)); }
return (body.length == 0) ? Flux.empty() : Flux.just(ByteBuffer.wrap(body));
public Flux<ByteBuffer> getBody() { return (body.length == 0) ? Flux.empty() : Flux.just(ByteBuffer.wrap(body)); }
class BufferedVertxHttpResponse extends VertxHttpResponseBase { private final byte[] body; public BufferedVertxHttpResponse(HttpRequest azureHttpRequest, HttpClientResponse vertxHttpResponse, Buffer body) { super(azureHttpRequest, vertxHttpResponse); this.body = body.getBytes(); } @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromBytes(body); } @Override @Override public Mono<byte[]> getBodyAsByteArray() { return (body.length == 0) ? Mono.empty() : Mono.just(body); } @Override public HttpResponse buffer() { return this; } }
class BufferedVertxHttpResponse extends VertxHttpResponseBase { private final byte[] body; public BufferedVertxHttpResponse(HttpRequest azureHttpRequest, HttpClientResponse vertxHttpResponse, Buffer body) { super(azureHttpRequest, vertxHttpResponse); this.body = body.getBytes(); } @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromBytes(body); } @Override @Override public Mono<byte[]> getBodyAsByteArray() { return (body.length == 0) ? Mono.empty() : Mono.just(body); } @Override public HttpResponse buffer() { return this; } }
Why do we compare against the class name instead of doing ``` e.getClass() != CredentialUnavailableException.class ```
public AccessToken getTokenSync(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); for (TokenCredential credential : credentials) { try { return credential.getTokenSync(request); } catch (Exception e) { if (!e.getClass().getSimpleName().equals("CredentialUnavailableException")) { throw new ClientAuthenticationException( unavailableError + credential.getClass().getSimpleName() + " authentication failed. Error Details: " + e.getMessage(), null, e); } exceptions.add((CredentialUnavailableException) e); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", credential.getClass().getSimpleName()); } } CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } throw last; }
if (!e.getClass().getSimpleName().equals("CredentialUnavailableException")) {
public AccessToken getTokenSync(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); for (TokenCredential credential : credentials) { try { return credential.getTokenSync(request); } catch (Exception e) { if (e.getClass() != CredentialUnavailableException.class) { throw new ClientAuthenticationException( unavailableError + credential.getClass().getSimpleName() + " authentication failed. Error Details: " + e.getMessage(), null, e); } else { if (e instanceof CredentialUnavailableException) { exceptions.add((CredentialUnavailableException) e); } } LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", credential.getClass().getSimpleName()); } } CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } throw last; }
class ChainedTokenCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } /** * Sequentially calls {@link TokenCredential * returning the first successfully obtained {@link AccessToken}. * * This method is called automatically by Azure SDK client libraries. * You may call this method directly, but you must also handle token * caching and token refreshing. * * @param request the details of the token request * @return a Publisher that emits a single access token */ @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> LOGGER.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } return Mono.error(last); })); } @Override }
class ChainedTokenCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } /** * Sequentially calls {@link TokenCredential * returning the first successfully obtained {@link AccessToken}. * * This method is called automatically by Azure SDK client libraries. * You may call this method directly, but you must also handle token * caching and token refreshing. * * @param request the details of the token request * @return a Publisher that emits a single access token */ @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> LOGGER.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } return Mono.error(last); })); } @Override }
Why do we throw in this case instead of adding the exception to the exceptions list and trying subsequent credentials?
public AccessToken getTokenSync(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); for (TokenCredential credential : credentials) { try { return credential.getTokenSync(request); } catch (Exception e) { if (!e.getClass().getSimpleName().equals("CredentialUnavailableException")) { throw new ClientAuthenticationException( unavailableError + credential.getClass().getSimpleName() + " authentication failed. Error Details: " + e.getMessage(), null, e); } exceptions.add((CredentialUnavailableException) e); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", credential.getClass().getSimpleName()); } } CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } throw last; }
throw new ClientAuthenticationException(
public AccessToken getTokenSync(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); for (TokenCredential credential : credentials) { try { return credential.getTokenSync(request); } catch (Exception e) { if (e.getClass() != CredentialUnavailableException.class) { throw new ClientAuthenticationException( unavailableError + credential.getClass().getSimpleName() + " authentication failed. Error Details: " + e.getMessage(), null, e); } else { if (e instanceof CredentialUnavailableException) { exceptions.add((CredentialUnavailableException) e); } } LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", credential.getClass().getSimpleName()); } } CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } throw last; }
class ChainedTokenCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } /** * Sequentially calls {@link TokenCredential * returning the first successfully obtained {@link AccessToken}. * * This method is called automatically by Azure SDK client libraries. * You may call this method directly, but you must also handle token * caching and token refreshing. * * @param request the details of the token request * @return a Publisher that emits a single access token */ @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> LOGGER.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } return Mono.error(last); })); } @Override }
class ChainedTokenCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } /** * Sequentially calls {@link TokenCredential * returning the first successfully obtained {@link AccessToken}. * * This method is called automatically by Azure SDK client libraries. * You may call this method directly, but you must also handle token * caching and token refreshing. * * @param request the details of the token request * @return a Publisher that emits a single access token */ @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> LOGGER.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } return Mono.error(last); })); } @Override }
nit: when the exception is ignored use a variable name like `ignored`
public AccessToken getTokenSync(TokenRequestContext request) { try { AccessToken token = identitySyncClient.authenticateWithConfidentialClientCacheSync(request); LoggingUtil.logTokenSuccess(LOGGER, request); return token; } catch (Exception e) { } try { AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request); LoggingUtil.logTokenSuccess(LOGGER, request); return token; } catch (Exception e) { LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e); throw e; } }
} catch (Exception e) { }
public AccessToken getTokenSync(TokenRequestContext request) { try { AccessToken token = identitySyncClient.authenticateWithConfidentialClientCache(request); LoggingUtil.logTokenSuccess(LOGGER, request); return token; } catch (Exception e) { } try { AccessToken token = identitySyncClient.authenticateWithConfidentialClient(request); LoggingUtil.logTokenSuccess(LOGGER, request); return token; } catch (Exception e) { LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e); throw e; } }
class ClientAssertionCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ClientAssertionCredential.class); private final IdentityClient identityClient; private final IdentitySyncClient identitySyncClient; /** * Creates an instance of ClientAssertionCredential. * * @param clientId the client ID of user assigned or system assigned identity. * @param tenantId the tenant ID of the application * @param clientAssertion the supplier of the client assertion * @param identityClientOptions the options to configure the identity client */ ClientAssertionCredential(String clientId, String tenantId, Supplier<String> clientAssertion, IdentityClientOptions identityClientOptions) { IdentityClientBuilder builder = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .clientAssertionSupplier(clientAssertion) .identityClientOptions(identityClientOptions); identityClient = builder.build(); identitySyncClient = builder.buildSyncClient(); } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { return identityClient.authenticateWithConfidentialClientCache(request) .onErrorResume(t -> Mono.empty()) .switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request))) .doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request)) .doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, error)); } @Override }
class ClientAssertionCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ClientAssertionCredential.class); private final IdentityClient identityClient; private final IdentitySyncClient identitySyncClient; /** * Creates an instance of ClientAssertionCredential. * * @param clientId the client ID of user assigned or system assigned identity. * @param tenantId the tenant ID of the application * @param clientAssertion the supplier of the client assertion * @param identityClientOptions the options to configure the identity client */ ClientAssertionCredential(String clientId, String tenantId, Supplier<String> clientAssertion, IdentityClientOptions identityClientOptions) { IdentityClientBuilder builder = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .clientAssertionSupplier(clientAssertion) .identityClientOptions(identityClientOptions); identityClient = builder.build(); identitySyncClient = builder.buildSyncClient(); } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { return identityClient.authenticateWithConfidentialClientCache(request) .onErrorResume(t -> Mono.empty()) .switchIfEmpty(Mono.defer(() -> identityClient.authenticateWithConfidentialClient(request))) .doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request)) .doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, error)); } @Override }
Tangential to the focus of this PR, does it make sense to allow the creation of the credential type with `automaticAuthentication = false`? The logic seems to be that the cached authentication cannot be used unless this authentication passes.
public AccessToken getTokenSync(TokenRequestContext request) { if (cachedToken.get() != null) { try { return identitySyncClient.authenticateWithPublicClientCache(request, cachedToken.get()); } catch (Exception e) { } } try { if (!automaticAuthentication) { throw LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request)); } MsalToken accessToken = identitySyncClient.authenticateWithDeviceCode(request, challengeConsumer); updateCache(accessToken); LoggingUtil.logTokenSuccess(LOGGER, request); return accessToken; } catch (Exception e) { LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e); throw e; } }
if (!automaticAuthentication) {
public AccessToken getTokenSync(TokenRequestContext request) { if (cachedToken.get() != null) { try { return identitySyncClient.authenticateWithPublicClientCache(request, cachedToken.get()); } catch (Exception e) { } } try { if (!automaticAuthentication) { throw LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request)); } MsalToken accessToken = identitySyncClient.authenticateWithDeviceCode(request, challengeConsumer); updateCache(accessToken); LoggingUtil.logTokenSuccess(LOGGER, request); return accessToken; } catch (Exception e) { LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e); throw e; } }
class DeviceCodeCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(DeviceCodeCredential.class); private final Consumer<DeviceCodeInfo> challengeConsumer; private final IdentityClient identityClient; private final IdentitySyncClient identitySyncClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final String authorityHost; private final boolean automaticAuthentication; /** * Creates a DeviceCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param challengeConsumer a method allowing the user to meet the device code challenge * @param automaticAuthentication indicates whether automatic authentication should be attempted or not. * @param identityClientOptions the options for configuring the identity client */ DeviceCodeCredential(String clientId, String tenantId, Consumer<DeviceCodeInfo> challengeConsumer, boolean automaticAuthentication, IdentityClientOptions identityClientOptions) { this.challengeConsumer = challengeConsumer; IdentityClientBuilder builder = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions); identityClient = builder.build(); identitySyncClient = builder.buildSyncClient(); this.cachedToken = new AtomicReference<>(); this.authorityHost = identityClientOptions.getAuthorityHost(); this.automaticAuthentication = automaticAuthentication; if (identityClientOptions.getAuthenticationRecord() != null) { cachedToken.set(new MsalAuthenticationAccount(identityClientOptions.getAuthenticationRecord())); } } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> { if (!automaticAuthentication) { return Mono.error(LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request))); } return identityClient.authenticateWithDeviceCode(request, challengeConsumer); })) .map(this::updateCache) .doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request)) .doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, error)); } @Override /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @param request The details of the authentication request. * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate(TokenRequestContext request) { return Mono.defer(() -> identityClient.authenticateWithDeviceCode(request, challengeConsumer)) .map(this::updateCache) .map(msalToken -> cachedToken.get().getAuthenticationRecord()); } /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate() { String defaultScope = AzureAuthorityHosts.getDefaultScope(authorityHost); if (defaultScope == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, identityClient.getIdentityClientOptions(), new CredentialUnavailableException("Authenticating in this " + "environment requires specifying a TokenRequestContext."))); } return authenticate(new TokenRequestContext().addScopes(defaultScope)); } private AccessToken updateCache(MsalToken msalToken) { cachedToken.set( new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId(), identityClient.getClientId()), msalToken.getAccount().getTenantProfiles())); return msalToken; } }
class DeviceCodeCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(DeviceCodeCredential.class); private final Consumer<DeviceCodeInfo> challengeConsumer; private final IdentityClient identityClient; private final IdentitySyncClient identitySyncClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final String authorityHost; private final boolean automaticAuthentication; /** * Creates a DeviceCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param challengeConsumer a method allowing the user to meet the device code challenge * @param automaticAuthentication indicates whether automatic authentication should be attempted or not. * @param identityClientOptions the options for configuring the identity client */ DeviceCodeCredential(String clientId, String tenantId, Consumer<DeviceCodeInfo> challengeConsumer, boolean automaticAuthentication, IdentityClientOptions identityClientOptions) { this.challengeConsumer = challengeConsumer; IdentityClientBuilder builder = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions); identityClient = builder.build(); identitySyncClient = builder.buildSyncClient(); this.cachedToken = new AtomicReference<>(); this.authorityHost = identityClientOptions.getAuthorityHost(); this.automaticAuthentication = automaticAuthentication; if (identityClientOptions.getAuthenticationRecord() != null) { cachedToken.set(new MsalAuthenticationAccount(identityClientOptions.getAuthenticationRecord())); } } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> { if (!automaticAuthentication) { return Mono.error(LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request))); } return identityClient.authenticateWithDeviceCode(request, challengeConsumer); })) .map(this::updateCache) .doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request)) .doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, error)); } @Override /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @param request The details of the authentication request. * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate(TokenRequestContext request) { return Mono.defer(() -> identityClient.authenticateWithDeviceCode(request, challengeConsumer)) .map(this::updateCache) .map(msalToken -> cachedToken.get().getAuthenticationRecord()); } /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate() { String defaultScope = AzureAuthorityHosts.getDefaultScope(authorityHost); if (defaultScope == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, identityClient.getIdentityClientOptions(), new CredentialUnavailableException("Authenticating in this " + "environment requires specifying a TokenRequestContext."))); } return authenticate(new TokenRequestContext().addScopes(defaultScope)); } private AccessToken updateCache(MsalToken msalToken) { cachedToken.set( new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId(), identityClient.getClientId()), msalToken.getAccount().getTenantProfiles())); return msalToken; } }
```suggestion String body = response.getBodyAsBinaryData().toString(); ```
public IHttpResponse send(HttpRequest httpRequest) { com.azure.core.http.HttpRequest request = new com.azure.core.http.HttpRequest( HttpMethod.valueOf(httpRequest.httpMethod().name()), httpRequest.url()); if (httpRequest.headers() != null) { request.setHeaders(new HttpHeaders(httpRequest.headers())); } if (httpRequest.body() != null) { request.setBody(httpRequest.body()); } HttpResponse response = httpPipeline.sendSync(request, Context.NONE); String body = response.getBodyAsBinaryData().toString(); if (CoreUtils.isNullOrEmpty(body)) { logAccountIdentifiersIfConfigured(body); com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .body(body) .statusCode(response.getStatusCode()); httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; } com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .statusCode(response.getStatusCode()); httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; }
String body = response.getBodyAsBinaryData().toString();
public IHttpResponse send(HttpRequest httpRequest) { com.azure.core.http.HttpRequest request = new com.azure.core.http.HttpRequest( HttpMethod.valueOf(httpRequest.httpMethod().name()), httpRequest.url()); if (httpRequest.headers() != null) { request.setHeaders(new HttpHeaders(httpRequest.headers())); } if (httpRequest.body() != null) { request.setBody(httpRequest.body()); } HttpResponse response = httpPipeline.sendSync(request, Context.NONE); String body = response.getBodyAsBinaryData().toString(); logAccountIdentifiersIfConfigured(body); com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .statusCode(response.getStatusCode()); if (!CoreUtils.isNullOrEmpty(body)) { httpResponse.body(body); } httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; }
class HttpPipelineAdapter implements IHttpClient { private static final ClientLogger CLIENT_LOGGER = new ClientLogger(HttpPipelineAdapter.class); private static final JsonFactory JSON_FACTORY = new JsonFactory(); private static final String ACCOUNT_IDENTIFIER_LOG_MESSAGE = "[Authenticated account] Client ID: {0}, Tenant ID: {1}" + ", User Principal Name: {2}, Object ID (user): {3})"; private static final String APPLICATION_IDENTIFIER = "Application Identifier"; private static final String OBJECT_ID = "Object Id"; private static final String TENANT_ID = "Tenant Id"; private static final String USER_PRINCIPAL_NAME = "User Principal Name"; private static final String ACCESS_TOKEN_JSON_KEY = "access_token"; private static final String APPLICATION_ID_JSON_KEY = "appid"; private static final String OBJECT_ID_JSON_KEY = "oid"; private static final String TENANT_ID_JSON_KEY = "tid"; private static final String USER_PRINCIPAL_NAME_JSON_KEY = "upn"; private final HttpPipeline httpPipeline; private IdentityClientOptions identityClientOptions; HttpPipelineAdapter(HttpPipeline httpPipeline, IdentityClientOptions identityClientOptions) { this.httpPipeline = httpPipeline; this.identityClientOptions = identityClientOptions; } @Override private void logAccountIdentifiersIfConfigured(String body) { if (identityClientOptions == null || !identityClientOptions.getIdentityLogOptionsImpl().isLoggingAccountIdentifiersAllowed()) { return; } try { JsonParser responseParser = JSON_FACTORY.createParser(body); String accessToken = getTargetFieldValueFromJsonParser(responseParser, ACCESS_TOKEN_JSON_KEY); responseParser.close(); if (accessToken != null) { String[] base64Metadata = accessToken.split("\\."); if (base64Metadata.length > 1) { byte[] decoded = Base64.getDecoder().decode(base64Metadata[1]); String data = new String(decoded, StandardCharsets.UTF_8); JsonParser jsonParser = JSON_FACTORY.createParser(data); HashMap<String, String> jsonMap = parseJsonIntoMap(jsonParser); jsonParser.close(); String appId = jsonMap.containsKey(APPLICATION_ID_JSON_KEY) ? jsonMap.get(APPLICATION_ID_JSON_KEY) : null; String objectId = jsonMap.containsKey(OBJECT_ID_JSON_KEY) ? jsonMap.get(OBJECT_ID_JSON_KEY) : null; String tenantId = jsonMap.containsKey(TENANT_ID_JSON_KEY) ? jsonMap.get(TENANT_ID_JSON_KEY) : null; String userPrincipalName = jsonMap.containsKey(USER_PRINCIPAL_NAME_JSON_KEY) ? jsonMap.get(USER_PRINCIPAL_NAME_JSON_KEY) : null; CLIENT_LOGGER.log(LogLevel.INFORMATIONAL, () -> MessageFormat .format(ACCOUNT_IDENTIFIER_LOG_MESSAGE, getAccountIdentifierMessage(APPLICATION_IDENTIFIER, appId), getAccountIdentifierMessage(TENANT_ID, tenantId), getAccountIdentifierMessage(USER_PRINCIPAL_NAME, userPrincipalName), getAccountIdentifierMessage(OBJECT_ID, objectId))); } } } catch (IOException e) { CLIENT_LOGGER.log(LogLevel.WARNING, () -> "allowLoggingAccountIdentifiers Log option was set," + " but the account information could not be logged.", e); } } private String getAccountIdentifierMessage(String identifierName, String identifierValue) { if (identifierValue == null) { return "No " + identifierName + " available."; } return identifierValue; } private String getTargetFieldValueFromJsonParser(JsonParser jsonParser, String targetField) throws IOException { while (jsonParser.nextToken() != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); if (targetField.equals(fieldName)) { jsonParser.nextToken(); return jsonParser.getText(); } } return null; } private HashMap<String, String> parseJsonIntoMap(JsonParser jsonParser) throws IOException { HashMap<String, String> output = new HashMap<>(); JsonToken currentToken = jsonParser.nextToken(); if (jsonParser.getCurrentName() == null) { currentToken = jsonParser.nextToken(); } while (currentToken != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); jsonParser.nextToken(); String value = jsonParser.getText(); output.put(fieldName, value); currentToken = jsonParser.nextToken(); } return output; } }
class HttpPipelineAdapter implements IHttpClient { private static final ClientLogger CLIENT_LOGGER = new ClientLogger(HttpPipelineAdapter.class); private static final JsonFactory JSON_FACTORY = new JsonFactory(); private static final String ACCOUNT_IDENTIFIER_LOG_MESSAGE = "[Authenticated account] Client ID: {0}, Tenant ID: {1}" + ", User Principal Name: {2}, Object ID (user): {3})"; private static final String APPLICATION_IDENTIFIER = "Application Identifier"; private static final String OBJECT_ID = "Object Id"; private static final String TENANT_ID = "Tenant Id"; private static final String USER_PRINCIPAL_NAME = "User Principal Name"; private static final String ACCESS_TOKEN_JSON_KEY = "access_token"; private static final String APPLICATION_ID_JSON_KEY = "appid"; private static final String OBJECT_ID_JSON_KEY = "oid"; private static final String TENANT_ID_JSON_KEY = "tid"; private static final String USER_PRINCIPAL_NAME_JSON_KEY = "upn"; private final HttpPipeline httpPipeline; private IdentityClientOptions identityClientOptions; HttpPipelineAdapter(HttpPipeline httpPipeline, IdentityClientOptions identityClientOptions) { this.httpPipeline = httpPipeline; this.identityClientOptions = identityClientOptions; } @Override private void logAccountIdentifiersIfConfigured(String body) { if (identityClientOptions == null || !identityClientOptions.getIdentityLogOptionsImpl().isLoggingAccountIdentifiersAllowed()) { return; } try { JsonParser responseParser = JSON_FACTORY.createParser(body); String accessToken = getTargetFieldValueFromJsonParser(responseParser, ACCESS_TOKEN_JSON_KEY); responseParser.close(); if (accessToken != null) { String[] base64Metadata = accessToken.split("\\."); if (base64Metadata.length > 1) { byte[] decoded = Base64.getDecoder().decode(base64Metadata[1]); String data = new String(decoded, StandardCharsets.UTF_8); JsonParser jsonParser = JSON_FACTORY.createParser(data); HashMap<String, String> jsonMap = parseJsonIntoMap(jsonParser); jsonParser.close(); String appId = jsonMap.containsKey(APPLICATION_ID_JSON_KEY) ? jsonMap.get(APPLICATION_ID_JSON_KEY) : null; String objectId = jsonMap.containsKey(OBJECT_ID_JSON_KEY) ? jsonMap.get(OBJECT_ID_JSON_KEY) : null; String tenantId = jsonMap.containsKey(TENANT_ID_JSON_KEY) ? jsonMap.get(TENANT_ID_JSON_KEY) : null; String userPrincipalName = jsonMap.containsKey(USER_PRINCIPAL_NAME_JSON_KEY) ? jsonMap.get(USER_PRINCIPAL_NAME_JSON_KEY) : null; CLIENT_LOGGER.log(LogLevel.INFORMATIONAL, () -> MessageFormat .format(ACCOUNT_IDENTIFIER_LOG_MESSAGE, getAccountIdentifierMessage(APPLICATION_IDENTIFIER, appId), getAccountIdentifierMessage(TENANT_ID, tenantId), getAccountIdentifierMessage(USER_PRINCIPAL_NAME, userPrincipalName), getAccountIdentifierMessage(OBJECT_ID, objectId))); } } } catch (IOException e) { CLIENT_LOGGER.log(LogLevel.WARNING, () -> "allowLoggingAccountIdentifiers Log option was set," + " but the account information could not be logged.", e); } } private String getAccountIdentifierMessage(String identifierName, String identifierValue) { if (identifierValue == null) { return "No " + identifierName + " available."; } return identifierValue; } private String getTargetFieldValueFromJsonParser(JsonParser jsonParser, String targetField) throws IOException { while (jsonParser.nextToken() != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); if (targetField.equals(fieldName)) { jsonParser.nextToken(); return jsonParser.getText(); } } return null; } private HashMap<String, String> parseJsonIntoMap(JsonParser jsonParser) throws IOException { HashMap<String, String> output = new HashMap<>(); JsonToken currentToken = jsonParser.nextToken(); if (jsonParser.getCurrentName() == null) { currentToken = jsonParser.nextToken(); } while (currentToken != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); jsonParser.nextToken(); String value = jsonParser.getText(); output.put(fieldName, value); currentToken = jsonParser.nextToken(); } return output; } }
Logic can be merged, the non-body case is the same as the body case except there is no call to `.body(body)`. ```java com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .statusCode(response.getStatusCode()); if (!CoreUtils.isNullOrEmpty(body)) { logAccountIdentifiersIfConfigured(body); httpResponse = httpResponse.body(body); } httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; ```
public IHttpResponse send(HttpRequest httpRequest) { com.azure.core.http.HttpRequest request = new com.azure.core.http.HttpRequest( HttpMethod.valueOf(httpRequest.httpMethod().name()), httpRequest.url()); if (httpRequest.headers() != null) { request.setHeaders(new HttpHeaders(httpRequest.headers())); } if (httpRequest.body() != null) { request.setBody(httpRequest.body()); } HttpResponse response = httpPipeline.sendSync(request, Context.NONE); String body = response.getBodyAsBinaryData().toString(); if (CoreUtils.isNullOrEmpty(body)) { logAccountIdentifiersIfConfigured(body); com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .body(body) .statusCode(response.getStatusCode()); httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; } com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .statusCode(response.getStatusCode()); httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; }
HttpHeader::getValuesList)));
public IHttpResponse send(HttpRequest httpRequest) { com.azure.core.http.HttpRequest request = new com.azure.core.http.HttpRequest( HttpMethod.valueOf(httpRequest.httpMethod().name()), httpRequest.url()); if (httpRequest.headers() != null) { request.setHeaders(new HttpHeaders(httpRequest.headers())); } if (httpRequest.body() != null) { request.setBody(httpRequest.body()); } HttpResponse response = httpPipeline.sendSync(request, Context.NONE); String body = response.getBodyAsBinaryData().toString(); logAccountIdentifiersIfConfigured(body); com.microsoft.aad.msal4j.HttpResponse httpResponse = new com.microsoft.aad.msal4j.HttpResponse() .statusCode(response.getStatusCode()); if (!CoreUtils.isNullOrEmpty(body)) { httpResponse.body(body); } httpResponse.addHeaders(response.getHeaders().stream().collect(Collectors.toMap(HttpHeader::getName, HttpHeader::getValuesList))); return httpResponse; }
class HttpPipelineAdapter implements IHttpClient { private static final ClientLogger CLIENT_LOGGER = new ClientLogger(HttpPipelineAdapter.class); private static final JsonFactory JSON_FACTORY = new JsonFactory(); private static final String ACCOUNT_IDENTIFIER_LOG_MESSAGE = "[Authenticated account] Client ID: {0}, Tenant ID: {1}" + ", User Principal Name: {2}, Object ID (user): {3})"; private static final String APPLICATION_IDENTIFIER = "Application Identifier"; private static final String OBJECT_ID = "Object Id"; private static final String TENANT_ID = "Tenant Id"; private static final String USER_PRINCIPAL_NAME = "User Principal Name"; private static final String ACCESS_TOKEN_JSON_KEY = "access_token"; private static final String APPLICATION_ID_JSON_KEY = "appid"; private static final String OBJECT_ID_JSON_KEY = "oid"; private static final String TENANT_ID_JSON_KEY = "tid"; private static final String USER_PRINCIPAL_NAME_JSON_KEY = "upn"; private final HttpPipeline httpPipeline; private IdentityClientOptions identityClientOptions; HttpPipelineAdapter(HttpPipeline httpPipeline, IdentityClientOptions identityClientOptions) { this.httpPipeline = httpPipeline; this.identityClientOptions = identityClientOptions; } @Override private void logAccountIdentifiersIfConfigured(String body) { if (identityClientOptions == null || !identityClientOptions.getIdentityLogOptionsImpl().isLoggingAccountIdentifiersAllowed()) { return; } try { JsonParser responseParser = JSON_FACTORY.createParser(body); String accessToken = getTargetFieldValueFromJsonParser(responseParser, ACCESS_TOKEN_JSON_KEY); responseParser.close(); if (accessToken != null) { String[] base64Metadata = accessToken.split("\\."); if (base64Metadata.length > 1) { byte[] decoded = Base64.getDecoder().decode(base64Metadata[1]); String data = new String(decoded, StandardCharsets.UTF_8); JsonParser jsonParser = JSON_FACTORY.createParser(data); HashMap<String, String> jsonMap = parseJsonIntoMap(jsonParser); jsonParser.close(); String appId = jsonMap.containsKey(APPLICATION_ID_JSON_KEY) ? jsonMap.get(APPLICATION_ID_JSON_KEY) : null; String objectId = jsonMap.containsKey(OBJECT_ID_JSON_KEY) ? jsonMap.get(OBJECT_ID_JSON_KEY) : null; String tenantId = jsonMap.containsKey(TENANT_ID_JSON_KEY) ? jsonMap.get(TENANT_ID_JSON_KEY) : null; String userPrincipalName = jsonMap.containsKey(USER_PRINCIPAL_NAME_JSON_KEY) ? jsonMap.get(USER_PRINCIPAL_NAME_JSON_KEY) : null; CLIENT_LOGGER.log(LogLevel.INFORMATIONAL, () -> MessageFormat .format(ACCOUNT_IDENTIFIER_LOG_MESSAGE, getAccountIdentifierMessage(APPLICATION_IDENTIFIER, appId), getAccountIdentifierMessage(TENANT_ID, tenantId), getAccountIdentifierMessage(USER_PRINCIPAL_NAME, userPrincipalName), getAccountIdentifierMessage(OBJECT_ID, objectId))); } } } catch (IOException e) { CLIENT_LOGGER.log(LogLevel.WARNING, () -> "allowLoggingAccountIdentifiers Log option was set," + " but the account information could not be logged.", e); } } private String getAccountIdentifierMessage(String identifierName, String identifierValue) { if (identifierValue == null) { return "No " + identifierName + " available."; } return identifierValue; } private String getTargetFieldValueFromJsonParser(JsonParser jsonParser, String targetField) throws IOException { while (jsonParser.nextToken() != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); if (targetField.equals(fieldName)) { jsonParser.nextToken(); return jsonParser.getText(); } } return null; } private HashMap<String, String> parseJsonIntoMap(JsonParser jsonParser) throws IOException { HashMap<String, String> output = new HashMap<>(); JsonToken currentToken = jsonParser.nextToken(); if (jsonParser.getCurrentName() == null) { currentToken = jsonParser.nextToken(); } while (currentToken != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); jsonParser.nextToken(); String value = jsonParser.getText(); output.put(fieldName, value); currentToken = jsonParser.nextToken(); } return output; } }
class HttpPipelineAdapter implements IHttpClient { private static final ClientLogger CLIENT_LOGGER = new ClientLogger(HttpPipelineAdapter.class); private static final JsonFactory JSON_FACTORY = new JsonFactory(); private static final String ACCOUNT_IDENTIFIER_LOG_MESSAGE = "[Authenticated account] Client ID: {0}, Tenant ID: {1}" + ", User Principal Name: {2}, Object ID (user): {3})"; private static final String APPLICATION_IDENTIFIER = "Application Identifier"; private static final String OBJECT_ID = "Object Id"; private static final String TENANT_ID = "Tenant Id"; private static final String USER_PRINCIPAL_NAME = "User Principal Name"; private static final String ACCESS_TOKEN_JSON_KEY = "access_token"; private static final String APPLICATION_ID_JSON_KEY = "appid"; private static final String OBJECT_ID_JSON_KEY = "oid"; private static final String TENANT_ID_JSON_KEY = "tid"; private static final String USER_PRINCIPAL_NAME_JSON_KEY = "upn"; private final HttpPipeline httpPipeline; private IdentityClientOptions identityClientOptions; HttpPipelineAdapter(HttpPipeline httpPipeline, IdentityClientOptions identityClientOptions) { this.httpPipeline = httpPipeline; this.identityClientOptions = identityClientOptions; } @Override private void logAccountIdentifiersIfConfigured(String body) { if (identityClientOptions == null || !identityClientOptions.getIdentityLogOptionsImpl().isLoggingAccountIdentifiersAllowed()) { return; } try { JsonParser responseParser = JSON_FACTORY.createParser(body); String accessToken = getTargetFieldValueFromJsonParser(responseParser, ACCESS_TOKEN_JSON_KEY); responseParser.close(); if (accessToken != null) { String[] base64Metadata = accessToken.split("\\."); if (base64Metadata.length > 1) { byte[] decoded = Base64.getDecoder().decode(base64Metadata[1]); String data = new String(decoded, StandardCharsets.UTF_8); JsonParser jsonParser = JSON_FACTORY.createParser(data); HashMap<String, String> jsonMap = parseJsonIntoMap(jsonParser); jsonParser.close(); String appId = jsonMap.containsKey(APPLICATION_ID_JSON_KEY) ? jsonMap.get(APPLICATION_ID_JSON_KEY) : null; String objectId = jsonMap.containsKey(OBJECT_ID_JSON_KEY) ? jsonMap.get(OBJECT_ID_JSON_KEY) : null; String tenantId = jsonMap.containsKey(TENANT_ID_JSON_KEY) ? jsonMap.get(TENANT_ID_JSON_KEY) : null; String userPrincipalName = jsonMap.containsKey(USER_PRINCIPAL_NAME_JSON_KEY) ? jsonMap.get(USER_PRINCIPAL_NAME_JSON_KEY) : null; CLIENT_LOGGER.log(LogLevel.INFORMATIONAL, () -> MessageFormat .format(ACCOUNT_IDENTIFIER_LOG_MESSAGE, getAccountIdentifierMessage(APPLICATION_IDENTIFIER, appId), getAccountIdentifierMessage(TENANT_ID, tenantId), getAccountIdentifierMessage(USER_PRINCIPAL_NAME, userPrincipalName), getAccountIdentifierMessage(OBJECT_ID, objectId))); } } } catch (IOException e) { CLIENT_LOGGER.log(LogLevel.WARNING, () -> "allowLoggingAccountIdentifiers Log option was set," + " but the account information could not be logged.", e); } } private String getAccountIdentifierMessage(String identifierName, String identifierValue) { if (identifierValue == null) { return "No " + identifierName + " available."; } return identifierValue; } private String getTargetFieldValueFromJsonParser(JsonParser jsonParser, String targetField) throws IOException { while (jsonParser.nextToken() != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); if (targetField.equals(fieldName)) { jsonParser.nextToken(); return jsonParser.getText(); } } return null; } private HashMap<String, String> parseJsonIntoMap(JsonParser jsonParser) throws IOException { HashMap<String, String> output = new HashMap<>(); JsonToken currentToken = jsonParser.nextToken(); if (jsonParser.getCurrentName() == null) { currentToken = jsonParser.nextToken(); } while (currentToken != JsonToken.END_OBJECT) { String fieldName = jsonParser.getCurrentName(); jsonParser.nextToken(); String value = jsonParser.getText(); output.put(fieldName, value); currentToken = jsonParser.nextToken(); } return output; } }
it is true by default in the builder, user configures it in the builder
public AccessToken getTokenSync(TokenRequestContext request) { if (cachedToken.get() != null) { try { return identitySyncClient.authenticateWithPublicClientCache(request, cachedToken.get()); } catch (Exception e) { } } try { if (!automaticAuthentication) { throw LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request)); } MsalToken accessToken = identitySyncClient.authenticateWithDeviceCode(request, challengeConsumer); updateCache(accessToken); LoggingUtil.logTokenSuccess(LOGGER, request); return accessToken; } catch (Exception e) { LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e); throw e; } }
if (!automaticAuthentication) {
public AccessToken getTokenSync(TokenRequestContext request) { if (cachedToken.get() != null) { try { return identitySyncClient.authenticateWithPublicClientCache(request, cachedToken.get()); } catch (Exception e) { } } try { if (!automaticAuthentication) { throw LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request)); } MsalToken accessToken = identitySyncClient.authenticateWithDeviceCode(request, challengeConsumer); updateCache(accessToken); LoggingUtil.logTokenSuccess(LOGGER, request); return accessToken; } catch (Exception e) { LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, e); throw e; } }
class DeviceCodeCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(DeviceCodeCredential.class); private final Consumer<DeviceCodeInfo> challengeConsumer; private final IdentityClient identityClient; private final IdentitySyncClient identitySyncClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final String authorityHost; private final boolean automaticAuthentication; /** * Creates a DeviceCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param challengeConsumer a method allowing the user to meet the device code challenge * @param automaticAuthentication indicates whether automatic authentication should be attempted or not. * @param identityClientOptions the options for configuring the identity client */ DeviceCodeCredential(String clientId, String tenantId, Consumer<DeviceCodeInfo> challengeConsumer, boolean automaticAuthentication, IdentityClientOptions identityClientOptions) { this.challengeConsumer = challengeConsumer; IdentityClientBuilder builder = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions); identityClient = builder.build(); identitySyncClient = builder.buildSyncClient(); this.cachedToken = new AtomicReference<>(); this.authorityHost = identityClientOptions.getAuthorityHost(); this.automaticAuthentication = automaticAuthentication; if (identityClientOptions.getAuthenticationRecord() != null) { cachedToken.set(new MsalAuthenticationAccount(identityClientOptions.getAuthenticationRecord())); } } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> { if (!automaticAuthentication) { return Mono.error(LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request))); } return identityClient.authenticateWithDeviceCode(request, challengeConsumer); })) .map(this::updateCache) .doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request)) .doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, error)); } @Override /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @param request The details of the authentication request. * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate(TokenRequestContext request) { return Mono.defer(() -> identityClient.authenticateWithDeviceCode(request, challengeConsumer)) .map(this::updateCache) .map(msalToken -> cachedToken.get().getAuthenticationRecord()); } /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate() { String defaultScope = AzureAuthorityHosts.getDefaultScope(authorityHost); if (defaultScope == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, identityClient.getIdentityClientOptions(), new CredentialUnavailableException("Authenticating in this " + "environment requires specifying a TokenRequestContext."))); } return authenticate(new TokenRequestContext().addScopes(defaultScope)); } private AccessToken updateCache(MsalToken msalToken) { cachedToken.set( new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId(), identityClient.getClientId()), msalToken.getAccount().getTenantProfiles())); return msalToken; } }
class DeviceCodeCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(DeviceCodeCredential.class); private final Consumer<DeviceCodeInfo> challengeConsumer; private final IdentityClient identityClient; private final IdentitySyncClient identitySyncClient; private final AtomicReference<MsalAuthenticationAccount> cachedToken; private final String authorityHost; private final boolean automaticAuthentication; /** * Creates a DeviceCodeCredential with the given identity client options. * * @param clientId the client ID of the application * @param tenantId the tenant ID of the application * @param challengeConsumer a method allowing the user to meet the device code challenge * @param automaticAuthentication indicates whether automatic authentication should be attempted or not. * @param identityClientOptions the options for configuring the identity client */ DeviceCodeCredential(String clientId, String tenantId, Consumer<DeviceCodeInfo> challengeConsumer, boolean automaticAuthentication, IdentityClientOptions identityClientOptions) { this.challengeConsumer = challengeConsumer; IdentityClientBuilder builder = new IdentityClientBuilder() .tenantId(tenantId) .clientId(clientId) .identityClientOptions(identityClientOptions); identityClient = builder.build(); identitySyncClient = builder.buildSyncClient(); this.cachedToken = new AtomicReference<>(); this.authorityHost = identityClientOptions.getAuthorityHost(); this.automaticAuthentication = automaticAuthentication; if (identityClientOptions.getAuthenticationRecord() != null) { cachedToken.set(new MsalAuthenticationAccount(identityClientOptions.getAuthenticationRecord())); } } @Override public Mono<AccessToken> getToken(TokenRequestContext request) { return Mono.defer(() -> { if (cachedToken.get() != null) { return identityClient.authenticateWithPublicClientCache(request, cachedToken.get()) .onErrorResume(t -> Mono.empty()); } else { return Mono.empty(); } }).switchIfEmpty( Mono.defer(() -> { if (!automaticAuthentication) { return Mono.error(LOGGER.logExceptionAsError(new AuthenticationRequiredException("Interactive " + "authentication is needed to acquire token. Call Authenticate to initiate the device " + "code authentication.", request))); } return identityClient.authenticateWithDeviceCode(request, challengeConsumer); })) .map(this::updateCache) .doOnNext(token -> LoggingUtil.logTokenSuccess(LOGGER, request)) .doOnError(error -> LoggingUtil.logTokenError(LOGGER, identityClient.getIdentityClientOptions(), request, error)); } @Override /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @param request The details of the authentication request. * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate(TokenRequestContext request) { return Mono.defer(() -> identityClient.authenticateWithDeviceCode(request, challengeConsumer)) .map(this::updateCache) .map(msalToken -> cachedToken.get().getAuthenticationRecord()); } /** * Authenticates a user via the device code flow. * * <p> The credential acquires a verification URL and code from the Azure Active Directory. The user must * browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user authenticates * successfully, the credential receives an access token. </p> * * @return The {@link AuthenticationRecord} which can be used to silently authenticate the account * on future execution if persistent caching was configured via * {@link DeviceCodeCredentialBuilder * when credential was instantiated. */ public Mono<AuthenticationRecord> authenticate() { String defaultScope = AzureAuthorityHosts.getDefaultScope(authorityHost); if (defaultScope == null) { return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, identityClient.getIdentityClientOptions(), new CredentialUnavailableException("Authenticating in this " + "environment requires specifying a TokenRequestContext."))); } return authenticate(new TokenRequestContext().addScopes(defaultScope)); } private AccessToken updateCache(MsalToken msalToken) { cachedToken.set( new MsalAuthenticationAccount( new AuthenticationRecord(msalToken.getAuthenticationResult(), identityClient.getTenantId(), identityClient.getClientId()), msalToken.getAccount().getTenantProfiles())); return msalToken; } }
This is a fail case, credential should fail here.
public AccessToken getTokenSync(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); for (TokenCredential credential : credentials) { try { return credential.getTokenSync(request); } catch (Exception e) { if (!e.getClass().getSimpleName().equals("CredentialUnavailableException")) { throw new ClientAuthenticationException( unavailableError + credential.getClass().getSimpleName() + " authentication failed. Error Details: " + e.getMessage(), null, e); } exceptions.add((CredentialUnavailableException) e); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", credential.getClass().getSimpleName()); } } CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } throw last; }
throw new ClientAuthenticationException(
public AccessToken getTokenSync(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); for (TokenCredential credential : credentials) { try { return credential.getTokenSync(request); } catch (Exception e) { if (e.getClass() != CredentialUnavailableException.class) { throw new ClientAuthenticationException( unavailableError + credential.getClass().getSimpleName() + " authentication failed. Error Details: " + e.getMessage(), null, e); } else { if (e instanceof CredentialUnavailableException) { exceptions.add((CredentialUnavailableException) e); } } LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", credential.getClass().getSimpleName()); } } CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } throw last; }
class ChainedTokenCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } /** * Sequentially calls {@link TokenCredential * returning the first successfully obtained {@link AccessToken}. * * This method is called automatically by Azure SDK client libraries. * You may call this method directly, but you must also handle token * caching and token refreshing. * * @param request the details of the token request * @return a Publisher that emits a single access token */ @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> LOGGER.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } return Mono.error(last); })); } @Override }
class ChainedTokenCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class); private final List<TokenCredential> credentials; private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> "; /** * Create an instance of chained token credential that aggregates a list of token * credentials. */ ChainedTokenCredential(List<TokenCredential> credentials) { this.credentials = Collections.unmodifiableList(credentials); } /** * Sequentially calls {@link TokenCredential * returning the first successfully obtained {@link AccessToken}. * * This method is called automatically by Azure SDK client libraries. * You may call this method directly, but you must also handle token * caching and token refreshing. * * @param request the details of the token request * @return a Publisher that emits a single access token */ @Override public Mono<AccessToken> getToken(TokenRequestContext request) { List<CredentialUnavailableException> exceptions = new ArrayList<>(4); return Flux.fromIterable(credentials) .flatMap(p -> p.getToken(request) .doOnNext(t -> LOGGER.info("Azure Identity => Attempted credential {} returns a token", p.getClass().getSimpleName())) .onErrorResume(Exception.class, t -> { if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) { return Mono.error(new ClientAuthenticationException( unavailableError + p.getClass().getSimpleName() + " authentication failed. Error Details: " + t.getMessage(), null, t)); } exceptions.add((CredentialUnavailableException) t); LOGGER.info("Azure Identity => Attempted credential {} is unavailable.", p.getClass().getSimpleName()); return Mono.empty(); }), 1) .next() .switchIfEmpty(Mono.defer(() -> { CredentialUnavailableException last = exceptions.get(exceptions.size() - 1); for (int z = exceptions.size() - 2; z >= 0; z--) { CredentialUnavailableException current = exceptions.get(z); last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage() + (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: : "")); } return Mono.error(last); })); } @Override }
Let's not leave dead code :)
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 4. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 5. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
Is it intended to remove the play prompt part? If so, please remove this comment and also the comment out area above as well.
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 4. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 5. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
nit: repeated step 4.
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 4. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
* 4. hang up the call.
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 5. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
what is the intention behind waiting for the operation to finish if we only care about the request?
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 4. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
waitForOperationCompletion(5000);
public void recognizeDtmfInACall(HttpClient httpClient) { /* Test case: ACS to ACS call * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target. * 3. get updated call properties and check for the connected state. * 4. prompt and recognize dtmf tones from target participant * 5. hang up the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("recognizeDtmfInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); PhoneNumberIdentifier targetUser = new PhoneNumberIdentifier(PHONE_USER_1); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(targetUser)); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> callResponse = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(callResponse); assertNotNull(callResponse.getValue()); assertNotNull(callResponse.getValue().getCallConnection()); assertNotNull(callResponse.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(callResponse.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); List<DtmfTone> stopTones = new ArrayList<>(); stopTones.add(DtmfTone.POUND); CallMediaRecognizeDtmfOptions callMediaRecognizeDtmfOptions = new CallMediaRecognizeDtmfOptions(targetUser, 5) .setStopTones(stopTones) .setInterToneTimeout(Duration.ofSeconds(5)); callMediaRecognizeDtmfOptions.setInitialSilenceTimeout(Duration.ofSeconds(15)); callMediaRecognizeDtmfOptions.setPlayPrompt(new FileSource().setUri(MEDIA_SOURCE)); Response<Void> dtmfResponse = callMediaAsync.startRecognizingWithResponse(new CallMediaRecognizeDtmfOptions(targetUser, 5)).block(); assertNotNull(dtmfResponse); assertEquals(202, dtmfResponse.getStatusCode()); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpeceted exception received", ex); } }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
class CallMediaAsyncLiveTests extends CallAutomationLiveTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") public void playMediaInACall(HttpClient httpClient) { /* Test case: * 1. create a CallAutomationClient. * 2. create a call from source to one ACS target and A PSTN target. * 3. play a media file to all participants. * 4. terminate the call. */ CallAutomationAsyncClient callClient = getCallAutomationClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); CommunicationIdentityAsyncClient identityClient = getCommunicationIdentityClientUsingConnectionString(httpClient) .addPolicy((context, next) -> logHeaders("playMediaInACall", next)) .buildAsyncClient(); try { String callbackUrl = "https: CommunicationIdentifier source = identityClient.createUser().block(); List<CommunicationIdentifier> targets = new ArrayList<>(Arrays.asList(new PhoneNumberIdentifier(PHONE_USER_1), new CommunicationUserIdentifier(ACS_USER_1))); CreateCallOptions createCallOptions = new CreateCallOptions(source, targets, callbackUrl) .setSourceCallerId(ACS_RESOURCE_PHONE); Response<CreateCallResult> result = callClient.createCallWithResponse(createCallOptions).block(); assertNotNull(result); assertNotNull(result.getValue()); assertNotNull(result.getValue().getCallConnection()); assertNotNull(result.getValue().getCallConnectionProperties()); waitForOperationCompletion(15000); CallConnectionAsync callConnectionAsync = callClient.getCallConnectionAsync(result.getValue().getCallConnectionProperties().getCallConnectionId()); assertNotNull(callConnectionAsync); CallConnectionProperties callConnectionProperties = callConnectionAsync.getCallProperties().block(); assertNotNull(callConnectionProperties); assertEquals(CallConnectionState.CONNECTED, callConnectionProperties.getCallConnectionState()); CallMediaAsync callMediaAsync = callConnectionAsync.getCallMediaAsync(); callMediaAsync.playToAll(new FileSource().setUri(MEDIA_SOURCE)).block(); waitForOperationCompletion(5000); callConnectionAsync.hangUp(true).block(); waitForOperationCompletion(5000); assertThrows(Exception.class, () -> callConnectionAsync.getCallProperties().block()); } catch (Exception ex) { fail("Unexpected exception received", ex); } } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase @DisabledIfEnvironmentVariable( named = "SKIP_LIVE_TEST", matches = "(?i)(true)", disabledReason = "Requires human intervention") }
would it make sense to cache the result in the constructor (or lazily upon request), so that if customers call `getElevations()` multiple times we don't copy each time?
public List<GeoPosition> getElevations() { List<GeoPosition> toreturn = new ArrayList<>(); for (Elevation e : this.elevations) { toreturn.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } return toreturn; }
List<GeoPosition> toreturn = new ArrayList<>();
public List<GeoPosition> getElevations() { List<GeoPosition> toreturn = new ArrayList<>(); for (Elevation e : this.elevations) { toreturn.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } return toreturn; }
class ElevationResult { /* * The response for point/points elevation API. The result will be in same * sequence of points listed in request. */ @JsonProperty(value = "data", access = JsonProperty.Access.WRITE_ONLY) private List<Elevation> elevations; /** Set default ElevationResult constructor to private */ private ElevationResult() {} /** * Get the elevations property: The response for point/points elevation API. The result will be in same sequence of * points listed in request. * * @return the elevations value */ }
class ElevationResult { /* * The response for point/points elevation API. The result will be in same * sequence of points listed in request. */ @JsonProperty(value = "data", access = JsonProperty.Access.WRITE_ONLY) private List<Elevation> elevations; /** Set default ElevationResult constructor to private */ private ElevationResult() {} /** * Get the elevations property: The response for point/points elevation API. The result will be in same sequence of * points listed in request. * * @return the elevations value */ }
also, would it make sense to return read-only list ?
public List<GeoPosition> getElevations() { List<GeoPosition> toreturn = new ArrayList<>(); for (Elevation e : this.elevations) { toreturn.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } return toreturn; }
List<GeoPosition> toreturn = new ArrayList<>();
public List<GeoPosition> getElevations() { List<GeoPosition> toreturn = new ArrayList<>(); for (Elevation e : this.elevations) { toreturn.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } return toreturn; }
class ElevationResult { /* * The response for point/points elevation API. The result will be in same * sequence of points listed in request. */ @JsonProperty(value = "data", access = JsonProperty.Access.WRITE_ONLY) private List<Elevation> elevations; /** Set default ElevationResult constructor to private */ private ElevationResult() {} /** * Get the elevations property: The response for point/points elevation API. The result will be in same sequence of * points listed in request. * * @return the elevations value */ }
class ElevationResult { /* * The response for point/points elevation API. The result will be in same * sequence of points listed in request. */ @JsonProperty(value = "data", access = JsonProperty.Access.WRITE_ONLY) private List<Elevation> elevations; /** Set default ElevationResult constructor to private */ private ElevationResult() {} /** * Get the elevations property: The response for point/points elevation API. The result will be in same sequence of * points listed in request. * * @return the elevations value */ }
This is rarely used more than once so it would make more sense to let customers know they should re-use the result (if it ever happens) instead of adding a more complicated transform process via the ClassCustomization library for a simple add that may cause other issues If added this requires adding a private field private List<GeoPosition> positions; that would require a more complex roundabout solution utilizing transformAST since it is modifying auto-generated code: public List<GeoPosition> getElevations() { if (positions == null) { positions = new ArrayList<>(); for (Elevation e : this.elevations) { positions.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } } return positions; }
public List<GeoPosition> getElevations() { List<GeoPosition> toreturn = new ArrayList<>(); for (Elevation e : this.elevations) { toreturn.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } return toreturn; }
List<GeoPosition> toreturn = new ArrayList<>();
public List<GeoPosition> getElevations() { List<GeoPosition> toreturn = new ArrayList<>(); for (Elevation e : this.elevations) { toreturn.add( new GeoPosition( e.getCoordinate().getLatitude(), e.getCoordinate().getLongitude(), (double) e.getElevationInMeters())); } return toreturn; }
class ElevationResult { /* * The response for point/points elevation API. The result will be in same * sequence of points listed in request. */ @JsonProperty(value = "data", access = JsonProperty.Access.WRITE_ONLY) private List<Elevation> elevations; /** Set default ElevationResult constructor to private */ private ElevationResult() {} /** * Get the elevations property: The response for point/points elevation API. The result will be in same sequence of * points listed in request. * * @return the elevations value */ }
class ElevationResult { /* * The response for point/points elevation API. The result will be in same * sequence of points listed in request. */ @JsonProperty(value = "data", access = JsonProperty.Access.WRITE_ONLY) private List<Elevation> elevations; /** Set default ElevationResult constructor to private */ private ElevationResult() {} /** * Get the elevations property: The response for point/points elevation API. The result will be in same sequence of * points listed in request. * * @return the elevations value */ }
nit: create the `ArrayList` with a size equal to the number of points `new ArrayList<>(points.size())`
public static List<LatLongPairAbbreviated> toLatLongPairAbbreviated(List<GeoPosition> points) { List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>(); for (GeoPosition point : points) { latLongPairList.add(new LatLongPairAbbreviated().setLat(point.getLatitude()).setLon(point.getLongitude())); } return latLongPairList; }
List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>();
public static List<LatLongPairAbbreviated> toLatLongPairAbbreviated(List<GeoPosition> points) { List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>((points.size())); for (GeoPosition point : points) { latLongPairList.add(new LatLongPairAbbreviated().setLat(point.getLatitude()).setLon(point.getLongitude())); } return latLongPairList; }
class Utility { public static List<String> geoPositionToString(List<GeoPosition> points) { List<String> stringPointsList = new ArrayList<>(); for (GeoPosition point : points) { stringPointsList.add(point.getLongitude() + "," + point.getLatitude()); } return stringPointsList; } public static List<Double> geoBoundingBoxAsList(GeoBoundingBox boundingBox) { return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth()); } }
class Utility { public static List<String> geoPositionToString(List<GeoPosition> points) { List<String> stringPointsList = new ArrayList<>(); for (GeoPosition point : points) { stringPointsList.add(point.getLongitude() + "," + point.getLatitude()); } return stringPointsList; } public static List<Double> geoBoundingBoxAsList(GeoBoundingBox boundingBox) { return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth()); } }
This doesn't need to be mutable, correct? `Arrays.asList` returns an immutable List
public static List<Double> geoBoundingBoxAsList(GeoBoundingBox boundingBox) { return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth()); }
return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth());
public static List<Double> geoBoundingBoxAsList(GeoBoundingBox boundingBox) { return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth()); }
class Utility { public static List<LatLongPairAbbreviated> toLatLongPairAbbreviated(List<GeoPosition> points) { List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>(); for (GeoPosition point : points) { latLongPairList.add(new LatLongPairAbbreviated().setLat(point.getLatitude()).setLon(point.getLongitude())); } return latLongPairList; } public static List<String> geoPositionToString(List<GeoPosition> points) { List<String> stringPointsList = new ArrayList<>(); for (GeoPosition point : points) { stringPointsList.add(point.getLongitude() + "," + point.getLatitude()); } return stringPointsList; } }
class Utility { public static List<LatLongPairAbbreviated> toLatLongPairAbbreviated(List<GeoPosition> points) { List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>((points.size())); for (GeoPosition point : points) { latLongPairList.add(new LatLongPairAbbreviated().setLat(point.getLatitude()).setLon(point.getLongitude())); } return latLongPairList; } public static List<String> geoPositionToString(List<GeoPosition> points) { List<String> stringPointsList = new ArrayList<>(); for (GeoPosition point : points) { stringPointsList.add(point.getLongitude() + "," + point.getLatitude()); } return stringPointsList; } }
Correct this doesn't need to be mutable!
public static List<Double> geoBoundingBoxAsList(GeoBoundingBox boundingBox) { return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth()); }
return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth());
public static List<Double> geoBoundingBoxAsList(GeoBoundingBox boundingBox) { return Arrays.asList(boundingBox.getWest(), boundingBox.getSouth(), boundingBox.getEast(), boundingBox.getNorth()); }
class Utility { public static List<LatLongPairAbbreviated> toLatLongPairAbbreviated(List<GeoPosition> points) { List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>(); for (GeoPosition point : points) { latLongPairList.add(new LatLongPairAbbreviated().setLat(point.getLatitude()).setLon(point.getLongitude())); } return latLongPairList; } public static List<String> geoPositionToString(List<GeoPosition> points) { List<String> stringPointsList = new ArrayList<>(); for (GeoPosition point : points) { stringPointsList.add(point.getLongitude() + "," + point.getLatitude()); } return stringPointsList; } }
class Utility { public static List<LatLongPairAbbreviated> toLatLongPairAbbreviated(List<GeoPosition> points) { List<LatLongPairAbbreviated> latLongPairList = new ArrayList<>((points.size())); for (GeoPosition point : points) { latLongPairList.add(new LatLongPairAbbreviated().setLat(point.getLatitude()).setLon(point.getLongitude())); } return latLongPairList; } public static List<String> geoPositionToString(List<GeoPosition> points) { List<String> stringPointsList = new ArrayList<>(); for (GeoPosition point : points) { stringPointsList.add(point.getLongitude() + "," + point.getLatitude()); } return stringPointsList; } }
This is outdated. Please, use the `kind` field for this selection.
public static MediaStreamingPackageBase parse(String stringJson) { try { ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); JsonNode jsonData = mapper.readTree(stringJson); if (stringJson.contains("data")) { MediaStreamingAudioInternal audioInternal = mapper.convertValue(jsonData, MediaStreamingAudioInternal.class); return new MediaStreamingAudio(audioInternal.getAudioData(), OffsetDateTime.parse(audioInternal.getTimestamp(), DateTimeFormatter.ISO_OFFSET_DATE_TIME), new CommunicationUserIdentifier(audioInternal.getParticipantRawID()), audioInternal.isSilent()); } if (stringJson.contains("encoding")) { MediaStreamingMetadataInternal metadataInternal = mapper.convertValue(jsonData, MediaStreamingMetadataInternal.class); return new MediaStreamingMetadata(metadataInternal.getMediaSubscriptionId(), metadataInternal.getEncoding(), metadataInternal.getSampleRate(), metadataInternal.getChannels(), metadataInternal.getLength()); } return null; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } catch (JsonProcessingException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
if (stringJson.contains("data")) {
public static MediaStreamingPackageBase parse(String stringJson) { try { ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); JsonNode jsonData = mapper.readTree(stringJson); if (stringJson.contains("AudioData")) { MediaStreamingAudioInternal audioInternal = mapper.convertValue(jsonData, MediaStreamingAudioInternal.class); return new MediaStreamingAudio(audioInternal.getAudioData(), audioInternal.getTimestamp(), audioInternal.getParticipantRawID(), audioInternal.isSilent()); } if (stringJson.contains("AudioMetadata")) { MediaStreamingMetadataInternal metadataInternal = mapper.convertValue(jsonData, MediaStreamingMetadataInternal.class); return new MediaStreamingMetadata(metadataInternal.getMediaSubscriptionId(), metadataInternal.getEncoding(), metadataInternal.getSampleRate(), metadataInternal.getChannels(), metadataInternal.getLength()); } return null; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } catch (JsonProcessingException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
class MediaStreamingPackageParser { private static final ClientLogger LOGGER = new ClientLogger(MediaStreamingPackageParser.class); /*** * Parses a Media Streaming package from BinaryData. * * @param json The MediaStreaming package as a BinaryData obejct. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(BinaryData json) { return parse(json.toString()); } /*** * Parses a Media Streaming package from byte array. * * @param receivedBytes The MediaStreaming package as a byte[]. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(byte[] receivedBytes) { return parse(new String(receivedBytes, StandardCharsets.UTF_8)); } /*** * Parses a Media Streaming package from String. * * @param stringJson The MediaStreaming package as a String. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ }
class MediaStreamingPackageParser { private static final ClientLogger LOGGER = new ClientLogger(MediaStreamingPackageParser.class); /*** * Parses a Media Streaming package from BinaryData. * * @param json The MediaStreaming package as a BinaryData obejct. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(BinaryData json) { return parse(json.toString()); } /*** * Parses a Media Streaming package from byte array. * * @param receivedBytes The MediaStreaming package as a byte[]. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(byte[] receivedBytes) { return parse(new String(receivedBytes, StandardCharsets.UTF_8)); } /*** * Parses a Media Streaming package from String. * * @param stringJson The MediaStreaming package as a String. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ }
this is supposed to be an int.
public void parseAudioMetadata() { String metadataJson = "{" + "\"subscriptionId\": \"subscriptionId\"," + "\"encoding\": \"PCM\"," + "\"sampleRate\": 8," + "\"channels\": 2," + "\"length\": 100.1" + "}"; MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(metadataJson); assertNotNull(mediaStreamingMetadata); checkAudioMetadata(mediaStreamingMetadata); }
+ "\"length\": 100.1"
public void parseAudioMetadata() { String metadataJson = "{" + "\"kind\": \"AudioMetadata\"," + "\"subscriptionId\": \"subscriptionId\"," + "\"encoding\": \"PCM\"," + "\"sampleRate\": 8," + "\"channels\": 2," + "\"length\": 100" + "}"; MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(metadataJson); assertNotNull(mediaStreamingMetadata); checkAudioMetadata(mediaStreamingMetadata); }
class MediaStreamingPackageParserUnitTests { @Test public void parseAudioData() { String audioJson = "{" + "\"timestamp\": \"2022-08-23T11:48:05Z\"," + "\"participantRawID\": \"participantId\"," + "\"data\": \"AQIDBAU=\"," + "\"silent\": false" + "}"; MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(audioJson); assertNotNull(mediaStreamingAudio); checkAudioData(mediaStreamingAudio); } @Test @Test public void parseBinaryAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonData)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonMetadata)); checkAudioMetadata(mediaStreamingMetadata); } @Test public void parseBinaryArrayAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(jsonData.getBytes(StandardCharsets.UTF_8)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryArrayAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(jsonMetadata.getBytes(StandardCharsets.UTF_8)); checkAudioMetadata(mediaStreamingMetadata); } private void checkAudioData(MediaStreamingAudio mediaStreamingAudio) { assertEquals(OffsetDateTime.parse("2022-08-23T11:48:05Z"), mediaStreamingAudio.getTimestamp()); assertEquals("participantId", mediaStreamingAudio.getParticipant().getRawId()); assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, mediaStreamingAudio.getAudioData()); assertEquals(false, mediaStreamingAudio.isSilent()); } private void checkAudioMetadata(MediaStreamingMetadata mediaStreamingMetadata) { assertEquals("subscriptionId", mediaStreamingMetadata.getMediaSubscriptionId()); assertEquals("PCM", mediaStreamingMetadata.getEncoding()); assertEquals(8, mediaStreamingMetadata.getSampleRate()); assertEquals(2, mediaStreamingMetadata.getChannels()); assertEquals(100.1, mediaStreamingMetadata.getLength()); } private String createJsonMetadata() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioMetadata = objectMapper.createObjectNode(); audioMetadata.put("subscriptionId", "subscriptionId"); audioMetadata.put("encoding", "PCM"); audioMetadata.put("sampleRate", 8); audioMetadata.put("channels", 2); audioMetadata.put("length", 100.1); return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(audioMetadata); } catch (Exception e) { throw new RuntimeException(); } } private String createJsonData() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioData = objectMapper.createObjectNode(); audioData.put("timestamp", "2022-08-23T11:48:05Z"); audioData.put("participantRawID", "participantId"); audioData.put("data", "AQIDBAU="); audioData.put("silent", false); return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(audioData); } catch (Exception e) { throw new RuntimeException(); } } }
class MediaStreamingPackageParserUnitTests { @Test public void parseAudioData() { String audioJson = "{" + "\"kind\": \"AudioData\"," + "\"timestamp\": \"2022-10-03T19:16:12.925Z\"," + "\"participantRawID\": \"participantId\"," + "\"data\": \"AQIDBAU=\"," + "\"silent\": false" + "}"; MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(audioJson); assertNotNull(mediaStreamingAudio); checkAudioData(mediaStreamingAudio); } @Test @Test public void parseBinaryAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonData)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonMetadata)); checkAudioMetadata(mediaStreamingMetadata); } @Test public void parseBinaryArrayAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(jsonData.getBytes(StandardCharsets.UTF_8)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryArrayAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(jsonMetadata.getBytes(StandardCharsets.UTF_8)); checkAudioMetadata(mediaStreamingMetadata); } private void checkAudioData(MediaStreamingAudio mediaStreamingAudio) { assertEquals(OffsetDateTime.parse("2022-10-03T19:16:12.925Z"), mediaStreamingAudio.getTimestamp()); assertEquals("participantId", mediaStreamingAudio.getParticipant().getRawId()); assertEquals("AQIDBAU=", mediaStreamingAudio.getAudioData()); assertEquals(false, mediaStreamingAudio.isSilent()); } private void checkAudioMetadata(MediaStreamingMetadata mediaStreamingMetadata) { assertEquals("subscriptionId", mediaStreamingMetadata.getMediaSubscriptionId()); assertEquals("PCM", mediaStreamingMetadata.getEncoding()); assertEquals(8, mediaStreamingMetadata.getSampleRate()); assertEquals(2, mediaStreamingMetadata.getChannels()); assertEquals(100, mediaStreamingMetadata.getLength()); } private String createJsonMetadata() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioMetadata = objectMapper.createObjectNode(); audioMetadata.put("kind", "AudioMetadata"); audioMetadata.put("subscriptionId", "subscriptionId"); audioMetadata.put("encoding", "PCM"); audioMetadata.put("sampleRate", 8); audioMetadata.put("channels", 2); audioMetadata.put("length", 100); /*ObjectNode root = objectMapper.createObjectNode(); root.put("kind", "AudioMetadata"); root.put("audioMetadata", audioMetadata);*/ return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(audioMetadata); } catch (Exception e) { throw new RuntimeException(); } } private String createJsonData() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioData = objectMapper.createObjectNode(); audioData.put("kind", "AudioData"); audioData.put("timestamp", "2022-10-03T19:16:12.925Z"); audioData.put("participantRawID", "participantId"); audioData.put("data", "AQIDBAU="); audioData.put("silent", false); /*ObjectNode root = objectMapper.createObjectNode(); root.put("kind", "AudioData"); root.put("audioData", audioData);*/ return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(audioData); } catch (Exception e) { throw new RuntimeException(); } } }
We should map the `recordingRedactors` to `TextProxySanitizer` to help with migration of existing tests that use this method.
public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (enableTestProxy) { proxyVariableQueue.clear(); return startProxyRecording(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); }
return startProxyRecording();
public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); }
class InterceptorManager implements AutoCloseable { private static final String RECORD_FOLDER = "session-records/"; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean enableTestProxy; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); private List<TestProxySanitizer> recordSanitizers; private List<TestProxyMatcher> customMatcher; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.getEnableTestProxy()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy) { this.enableTestProxy = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.enableTestProxy = false; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); return proxyVariableQueue.remove(); }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ public HttpPipelinePolicy getRecordPolicy() { if (enableTestProxy) { return startProxyRecording(); } return getRecordPolicy(Collections.emptyList()); } /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. */ /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { if (enableTestProxy) { testProxyPlaybackClient = new TestProxyPlaybackClient(this.recordSanitizers, this.customMatcher); proxyVariableQueue.addAll(testProxyPlaybackClient.startPlayback(playbackRecordName)); return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { if (allowedToRecordValues) { if (enableTestProxy) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath())) { RECORD_MAPPER.writeValue(writer, recordedData); } catch (IOException ex) { throw LOGGER.logExceptionAsError( new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && enableTestProxy) { testProxyPlaybackClient.stopPlayback(); } } private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedData.class); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } /** * Get the {@link File} pointing to the folder where session records live. * @return The session-records folder. * @throws IllegalStateException if the session-records folder cannot be found. */ public static File getRecordFolder() { URL folderUrl = InterceptorManager.class.getClassLoader().getResource(RECORD_FOLDER); if (folderUrl != null) { return new File(toURI(folderUrl, LOGGER)); } throw new IllegalStateException("Unable to locate session-records folder. Please create a session-records " + "folder in '/src/test/resources' of the module (ex. for azure-core-test this is " + "'/sdk/core/azure-core-test/src/test/resources/session-records')."); } private static URI toURI(URL url, ClientLogger logger) { try { return url.toURI(); } catch (URISyntaxException ex) { throw logger.logExceptionAsError(new IllegalStateException(ex)); } } private HttpPipelinePolicy startProxyRecording() { this.testProxyRecordPolicy = new TestProxyRecordPolicy(this.recordSanitizers); testProxyRecordPolicy.startRecording(playbackRecordName); return testProxyRecordPolicy; } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add text replacement rule (regex as key, the replacement text as value) into {@code recordSanitizers} * @param testProxySanitizers the list of replacement regex and rules. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { this.recordSanitizers = testProxySanitizers; } /** * Add matcher rules to match recorded data in playback. * @param testProxyMatchers the list of matcher rules when playing back recorded data. */ public void addMatchers(List<TestProxyMatcher> testProxyMatchers) { this.customMatcher = testProxyMatchers; } }
class InterceptorManager implements AutoCloseable { private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean testProxyEnabled; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.isTestProxyEnabled()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy) { this.testProxyEnabled = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.testProxyEnabled = false; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); if (!CoreUtils.isNullOrEmpty(proxyVariableQueue)) { return proxyVariableQueue.remove(); } else { throw LOGGER.logExceptionAsError(new RuntimeException("'proxyVariableQueue' cannot be null or empty.")); } }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ public HttpPipelinePolicy getRecordPolicy() { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return getRecordPolicy(Collections.emptyList()); } /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. */ /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { if (testProxyEnabled) { if (testProxyPlaybackClient == null) { testProxyPlaybackClient = new TestProxyPlaybackClient(); proxyVariableQueue.addAll(testProxyPlaybackClient.startPlayback(playbackRecordName)); } return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { if (allowedToRecordValues) { if (testProxyEnabled) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath())) { RECORD_MAPPER.writeValue(writer, recordedData); } catch (IOException ex) { throw LOGGER.logExceptionAsError( new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && testProxyEnabled && allowedToReadRecordedValues) { testProxyPlaybackClient.stopPlayback(); } } private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedData.class); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } private HttpPipelinePolicy getProxyRecordingPolicy() { if (testProxyRecordPolicy == null) { testProxyRecordPolicy = new TestProxyRecordPolicy(); testProxyRecordPolicy.startRecording(playbackRecordName); } return testProxyRecordPolicy; } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = TestUtils.getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = TestUtils.getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add sanitizer rule for sanitization during record or playback. * @param testProxySanitizers the list of replacement regex and rules. * @throws RuntimeException Neither playback or record has started. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addProxySanitization(testProxySanitizers); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.addProxySanitization(testProxySanitizers); } else { throw new RuntimeException("Playback or record must have been started before adding sanitizers."); } } /** * Add matcher rules to match recorded data in playback. * @param testProxyMatchers the list of matcher rules when playing back recorded data. * @throws RuntimeException Playback has not started. */ public void addMatchers(List<TestProxyRequestMatcher> testProxyMatchers) { if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addMatcherRequests(testProxyMatchers); } else { throw new RuntimeException("Playback must have been started before adding matchers."); } } }
I wonder if it is better to let the constructor to handle the data type translations.
public static MediaStreamingPackageBase parse(String stringJson) { try { ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); JsonNode jsonData = mapper.readTree(stringJson); if (stringJson.contains("data")) { MediaStreamingAudioInternal audioInternal = mapper.convertValue(jsonData, MediaStreamingAudioInternal.class); return new MediaStreamingAudio(audioInternal.getAudioData(), OffsetDateTime.parse(audioInternal.getTimestamp(), DateTimeFormatter.ISO_OFFSET_DATE_TIME), new CommunicationUserIdentifier(audioInternal.getParticipantRawID()), audioInternal.isSilent()); } if (stringJson.contains("encoding")) { MediaStreamingMetadataInternal metadataInternal = mapper.convertValue(jsonData, MediaStreamingMetadataInternal.class); return new MediaStreamingMetadata(metadataInternal.getMediaSubscriptionId(), metadataInternal.getEncoding(), metadataInternal.getSampleRate(), metadataInternal.getChannels(), metadataInternal.getLength()); } return null; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } catch (JsonProcessingException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
return new MediaStreamingAudio(audioInternal.getAudioData(), OffsetDateTime.parse(audioInternal.getTimestamp(), DateTimeFormatter.ISO_OFFSET_DATE_TIME), new CommunicationUserIdentifier(audioInternal.getParticipantRawID()), audioInternal.isSilent());
public static MediaStreamingPackageBase parse(String stringJson) { try { ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); JsonNode jsonData = mapper.readTree(stringJson); if (stringJson.contains("AudioData")) { MediaStreamingAudioInternal audioInternal = mapper.convertValue(jsonData, MediaStreamingAudioInternal.class); return new MediaStreamingAudio(audioInternal.getAudioData(), audioInternal.getTimestamp(), audioInternal.getParticipantRawID(), audioInternal.isSilent()); } if (stringJson.contains("AudioMetadata")) { MediaStreamingMetadataInternal metadataInternal = mapper.convertValue(jsonData, MediaStreamingMetadataInternal.class); return new MediaStreamingMetadata(metadataInternal.getMediaSubscriptionId(), metadataInternal.getEncoding(), metadataInternal.getSampleRate(), metadataInternal.getChannels(), metadataInternal.getLength()); } return null; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } catch (JsonProcessingException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
class MediaStreamingPackageParser { private static final ClientLogger LOGGER = new ClientLogger(MediaStreamingPackageParser.class); /*** * Parses a Media Streaming package from BinaryData. * * @param json The MediaStreaming package as a BinaryData obejct. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(BinaryData json) { return parse(json.toString()); } /*** * Parses a Media Streaming package from byte array. * * @param receivedBytes The MediaStreaming package as a byte[]. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(byte[] receivedBytes) { return parse(new String(receivedBytes, StandardCharsets.UTF_8)); } /*** * Parses a Media Streaming package from String. * * @param stringJson The MediaStreaming package as a String. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ }
class MediaStreamingPackageParser { private static final ClientLogger LOGGER = new ClientLogger(MediaStreamingPackageParser.class); /*** * Parses a Media Streaming package from BinaryData. * * @param json The MediaStreaming package as a BinaryData obejct. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(BinaryData json) { return parse(json.toString()); } /*** * Parses a Media Streaming package from byte array. * * @param receivedBytes The MediaStreaming package as a byte[]. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ public static MediaStreamingPackageBase parse(byte[] receivedBytes) { return parse(new String(receivedBytes, StandardCharsets.UTF_8)); } /*** * Parses a Media Streaming package from String. * * @param stringJson The MediaStreaming package as a String. * @throws RuntimeException Any exceptions occurs at runtime. * @return a MediaStreamingPackageBase object. */ }
I'm sorry, I should have checked this before. This data is set is wrong, this is the right contract: ``` { "kind": "AudioData", "timestamp": "2022-09-15T01:30:35.356Z", "participantRawId": "abc123", "data": "Bg==", // to be documented as base64-encoded string; not byte[] "silent": false } ``` Everything is flatten. I don't know if this has implication in your code, but you could check it out. Reference: https://skype.visualstudio.com/SPOOL/_workitems/edit/2968625
public void parseAudioData() { String audioJson = "{" + "\"kind\": \"AudioData\"," + "\"audioData\": {" + "\"timestamp\": \"2022-10-03T19:16:12.925Z\"," + "\"participantRawID\": \"participantId\"," + "\"data\": \"AQIDBAU=\"," + "\"silent\": false" + "}" + "}"; MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(audioJson); assertNotNull(mediaStreamingAudio); checkAudioData(mediaStreamingAudio); }
+ "\"kind\": \"AudioData\","
public void parseAudioData() { String audioJson = "{" + "\"kind\": \"AudioData\"," + "\"timestamp\": \"2022-10-03T19:16:12.925Z\"," + "\"participantRawID\": \"participantId\"," + "\"data\": \"AQIDBAU=\"," + "\"silent\": false" + "}"; MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(audioJson); assertNotNull(mediaStreamingAudio); checkAudioData(mediaStreamingAudio); }
class MediaStreamingPackageParserUnitTests { @Test @Test public void parseAudioMetadata() { String metadataJson = "{" + " \"kind\": \"AudioMetadata\"," + "\"audioMetadata\": {" + "\"subscriptionId\": \"subscriptionId\"," + "\"encoding\": \"PCM\"," + "\"sampleRate\": 8," + "\"channels\": 2," + "\"length\": 100" + "}" + "}"; MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(metadataJson); assertNotNull(mediaStreamingMetadata); checkAudioMetadata(mediaStreamingMetadata); } @Test public void parseBinaryAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonData)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonMetadata)); checkAudioMetadata(mediaStreamingMetadata); } @Test public void parseBinaryArrayAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(jsonData.getBytes(StandardCharsets.UTF_8)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryArrayAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(jsonMetadata.getBytes(StandardCharsets.UTF_8)); checkAudioMetadata(mediaStreamingMetadata); } private void checkAudioData(MediaStreamingAudio mediaStreamingAudio) { assertEquals(OffsetDateTime.parse("2022-10-03T19:16:12.925Z"), mediaStreamingAudio.getTimestamp()); assertEquals("participantId", mediaStreamingAudio.getParticipant().getRawId()); assertEquals("AQIDBAU=", mediaStreamingAudio.getAudioData()); assertEquals(false, mediaStreamingAudio.isSilent()); } private void checkAudioMetadata(MediaStreamingMetadata mediaStreamingMetadata) { assertEquals("subscriptionId", mediaStreamingMetadata.getMediaSubscriptionId()); assertEquals("PCM", mediaStreamingMetadata.getEncoding()); assertEquals(8, mediaStreamingMetadata.getSampleRate()); assertEquals(2, mediaStreamingMetadata.getChannels()); assertEquals(100, mediaStreamingMetadata.getLength()); } private String createJsonMetadata() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioMetadata = objectMapper.createObjectNode(); audioMetadata.put("subscriptionId", "subscriptionId"); audioMetadata.put("encoding", "PCM"); audioMetadata.put("sampleRate", 8); audioMetadata.put("channels", 2); audioMetadata.put("length", 100); ObjectNode root = objectMapper.createObjectNode(); root.put("kind", "AudioMetadata"); root.put("audioMetadata", audioMetadata); return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root); } catch (Exception e) { throw new RuntimeException(); } } private String createJsonData() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioData = objectMapper.createObjectNode(); audioData.put("timestamp", "2022-10-03T19:16:12.925Z"); audioData.put("participantRawID", "participantId"); audioData.put("data", "AQIDBAU="); audioData.put("silent", false); ObjectNode root = objectMapper.createObjectNode(); root.put("kind", "AudioData"); root.put("audioData", audioData); return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root); } catch (Exception e) { throw new RuntimeException(); } } }
class MediaStreamingPackageParserUnitTests { @Test @Test public void parseAudioMetadata() { String metadataJson = "{" + "\"kind\": \"AudioMetadata\"," + "\"subscriptionId\": \"subscriptionId\"," + "\"encoding\": \"PCM\"," + "\"sampleRate\": 8," + "\"channels\": 2," + "\"length\": 100" + "}"; MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(metadataJson); assertNotNull(mediaStreamingMetadata); checkAudioMetadata(mediaStreamingMetadata); } @Test public void parseBinaryAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonData)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(BinaryData.fromString(jsonMetadata)); checkAudioMetadata(mediaStreamingMetadata); } @Test public void parseBinaryArrayAudioData() { String jsonData = createJsonData(); MediaStreamingAudio mediaStreamingAudio = (MediaStreamingAudio) MediaStreamingPackageParser.parse(jsonData.getBytes(StandardCharsets.UTF_8)); checkAudioData(mediaStreamingAudio); } @Test public void parseBinaryArrayAudioMetadata() { String jsonMetadata = createJsonMetadata(); MediaStreamingMetadata mediaStreamingMetadata = (MediaStreamingMetadata) MediaStreamingPackageParser.parse(jsonMetadata.getBytes(StandardCharsets.UTF_8)); checkAudioMetadata(mediaStreamingMetadata); } private void checkAudioData(MediaStreamingAudio mediaStreamingAudio) { assertEquals(OffsetDateTime.parse("2022-10-03T19:16:12.925Z"), mediaStreamingAudio.getTimestamp()); assertEquals("participantId", mediaStreamingAudio.getParticipant().getRawId()); assertEquals("AQIDBAU=", mediaStreamingAudio.getAudioData()); assertEquals(false, mediaStreamingAudio.isSilent()); } private void checkAudioMetadata(MediaStreamingMetadata mediaStreamingMetadata) { assertEquals("subscriptionId", mediaStreamingMetadata.getMediaSubscriptionId()); assertEquals("PCM", mediaStreamingMetadata.getEncoding()); assertEquals(8, mediaStreamingMetadata.getSampleRate()); assertEquals(2, mediaStreamingMetadata.getChannels()); assertEquals(100, mediaStreamingMetadata.getLength()); } private String createJsonMetadata() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioMetadata = objectMapper.createObjectNode(); audioMetadata.put("kind", "AudioMetadata"); audioMetadata.put("subscriptionId", "subscriptionId"); audioMetadata.put("encoding", "PCM"); audioMetadata.put("sampleRate", 8); audioMetadata.put("channels", 2); audioMetadata.put("length", 100); /*ObjectNode root = objectMapper.createObjectNode(); root.put("kind", "AudioMetadata"); root.put("audioMetadata", audioMetadata);*/ return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(audioMetadata); } catch (Exception e) { throw new RuntimeException(); } } private String createJsonData() { try { ObjectMapper objectMapper = new ObjectMapper(); ObjectNode audioData = objectMapper.createObjectNode(); audioData.put("kind", "AudioData"); audioData.put("timestamp", "2022-10-03T19:16:12.925Z"); audioData.put("participantRawID", "participantId"); audioData.put("data", "AQIDBAU="); audioData.put("silent", false); /*ObjectNode root = objectMapper.createObjectNode(); root.put("kind", "AudioData"); root.put("audioData", audioData);*/ return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(audioData); } catch (Exception e) { throw new RuntimeException(); } } }
These catch blocks aren't exactly correct as there are other types of exceptions that could happen, and the cause exception isn't the same as the exception: ```java if (e instanceof IOException) { throw (IOException) e; } else if (e instanceof RuntimeException) { throw (RuntimeException) e; } else if (e instanceof Error) { throw (Error) e; } else { throw new RuntimeException(e); } ``` This will make sure that all potential `Throwable`s are properly handled. Should also turn this into a static method on this class, or in a package private utility method, as this is repeated often.
public JsonToken nextToken() throws IOException { if (complete) { return currentToken; } try { if (currentToken == JsonToken.START_OBJECT) { beginObjectMethod.invoke(gsonReader); } else if (currentToken == JsonToken.END_OBJECT) { endObjectMethod.invoke(gsonReader); } else if (currentToken == JsonToken.START_ARRAY) { beginArrayMethod.invoke(gsonReader); } else if (currentToken == JsonToken.END_ARRAY) { endArrayMethod.invoke(gsonReader); } else if (currentToken == JsonToken.NULL) { nextNullMethod.invoke(gsonReader); } if (!consumed && currentToken != null) { switch (currentToken) { case FIELD_NAME: nextNameMethod.invoke(gsonReader); break; case BOOLEAN: nextBooleanMethod.invoke(gsonReader); break; case NUMBER: nextDoubleMethod.invoke(gsonReader); break; case STRING: nextStringMethod.invoke(gsonReader); break; default: break; } } currentToken = mapToken((Enum<?>) peekMethod.invoke(gsonReader)); if (currentToken == JsonToken.END_DOCUMENT) { complete = true; } consumed = false; return currentToken; } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } }
}
public JsonToken nextToken() throws IOException { if (complete) { return currentToken; } if (currentToken == JsonToken.START_OBJECT) { JSON_READER_BEGIN_OBJECT.beginObject(gsonJsonReader); } else if (currentToken == JsonToken.END_OBJECT) { JSON_READER_END_OBJECT.endObject(gsonJsonReader); } else if (currentToken == JsonToken.START_ARRAY) { JSON_READER_BEGIN_ARRAY.beginArray(gsonJsonReader); } else if (currentToken == JsonToken.END_ARRAY) { JSON_READER_END_ARRAY.endArray(gsonJsonReader); } else if (currentToken == JsonToken.NULL) { JSON_READER_NEXT_NULL.nextNull(gsonJsonReader); } if (!consumed && currentToken != null) { switch (currentToken) { case FIELD_NAME: JSON_READER_NEXT_NAME.nextName(gsonJsonReader); break; case BOOLEAN: JSON_READER_NEXT_BOOLEAN.nextBoolean(gsonJsonReader); break; case NUMBER: JSON_READER_NEXT_DOUBLE.nextDouble(gsonJsonReader); break; case STRING: JSON_READER_NEXT_STRING.nextString(gsonJsonReader); break; default: break; } } currentToken = mapToken((Enum<?>) JSON_READER_PEEK.peek(gsonJsonReader)); if (currentToken == JsonToken.END_DOCUMENT) { complete = true; } consumed = false; return currentToken; }
class GsonJsonReader extends JsonReader { private static boolean initialized = false; private static boolean attemptedInitialization = false; private static final MethodHandles.Lookup publicLookup = MethodHandles.publicLookup(); private static Class<?> gsonTokenEnum = null; private final Object gsonReader; private JsonToken currentToken; private static MethodHandle gsonReaderConstructor; private static MethodHandle setLenientMethod; private static MethodHandle peekMethod; private static MethodHandle closeMethod; private static MethodHandle beginObjectMethod; private static MethodHandle endObjectMethod; private static MethodHandle beginArrayMethod; private static MethodHandle endArrayMethod; private static MethodHandle nextNullMethod; private static MethodHandle nextBooleanMethod; private static MethodHandle nextStringMethod; private static MethodHandle nextDoubleMethod; private static MethodHandle nextIntMethod; private static MethodHandle nextLongMethod; private static MethodHandle nextNameMethod; private static MethodHandle skipValueMethod; private final byte[] jsonBytes; private final String jsonString; private final boolean resetSupported; private final boolean nonNumericNumbersSupported; private boolean consumed = false; private boolean complete = false; /** * Constructs an instance of {@link JsonReader} from a {@code byte[]}. * * @param json JSON {@code byte[]}. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link JsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromBytes(byte[] json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(json), StandardCharsets.UTF_8), true, json, null, options); } /** * Constructs an instance of {@link JsonReader} from a String. * * @param json JSON String. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link JsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromString(String json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new StringReader(json), true, null, json, options); } /** * Constructs an instance of {@link JsonReader} from an {@link InputStream}. * * @param json JSON {@link InputStream}. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link JsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromStream(InputStream json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(json, StandardCharsets.UTF_8), json.markSupported(), null, null, options); } /** * Constructs an instance of {@link GsonJsonReader} from a {@link Reader}. * * @param json JSON {@link Reader}. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link GsonJsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromReader(Reader json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(json, json.markSupported(), null, null, options); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, JsonOptions options) { this(reader, resetSupported, jsonBytes, jsonString, options.isNonNumericNumbersSupported()); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, boolean nonNumericNumbersSupported) { try { initialize(); gsonReader = gsonReaderConstructor.invoke(reader); setLenientMethod.invoke(gsonReader, nonNumericNumbersSupported); } catch (Throwable e) { if (e instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } else { throw new IllegalStateException("Gson is not present or an incorrect version is present."); } } this.resetSupported = resetSupported; this.jsonBytes = jsonBytes; this.jsonString = jsonString; this.nonNumericNumbersSupported = nonNumericNumbersSupported; } static synchronized void initialize() throws ReflectiveOperationException { if (initialized) { return; } else if (attemptedInitialization) { throw new ReflectiveOperationException("Initialization of GsonJsonReader has failed in the past."); } attemptedInitialization = true; gsonTokenEnum = Class.forName("com.google.gson.stream.JsonToken"); Class<?> gsonReaderClass = Class.forName("com.google.gson.stream.JsonReader"); MethodType voidMT = methodType(void.class); gsonReaderConstructor = publicLookup.findConstructor(gsonReaderClass, methodType(void.class, Reader.class)); setLenientMethod = publicLookup.findVirtual(gsonReaderClass, "setLenient", methodType(void.class, boolean.class)); peekMethod = publicLookup.findVirtual(gsonReaderClass, "peek", methodType(gsonTokenEnum)); closeMethod = publicLookup.findVirtual(gsonReaderClass, "close", voidMT); beginObjectMethod = publicLookup.findVirtual(gsonReaderClass, "beginObject", voidMT); endObjectMethod = publicLookup.findVirtual(gsonReaderClass, "endObject", voidMT); beginArrayMethod = publicLookup.findVirtual(gsonReaderClass, "beginArray", voidMT); endArrayMethod = publicLookup.findVirtual(gsonReaderClass, "endArray", voidMT); nextNullMethod = publicLookup.findVirtual(gsonReaderClass, "nextNull", voidMT); nextBooleanMethod = publicLookup.findVirtual(gsonReaderClass, "nextBoolean", methodType(boolean.class)); nextStringMethod = publicLookup.findVirtual(gsonReaderClass, "nextString", methodType(String.class)); nextDoubleMethod = publicLookup.findVirtual(gsonReaderClass, "nextDouble", methodType(double.class)); nextIntMethod = publicLookup.findVirtual(gsonReaderClass, "nextInt", methodType(int.class)); nextLongMethod = publicLookup.findVirtual(gsonReaderClass, "nextLong", methodType(long.class)); nextNameMethod = publicLookup.findVirtual(gsonReaderClass, "nextName", methodType(String.class)); skipValueMethod = publicLookup.findVirtual(gsonReaderClass, "skipValue", voidMT); initialized = true; } @Override public JsonToken currentToken() { return currentToken; } @Override @Override public byte[] getBinary() throws IOException { consumed = true; try { if (currentToken == JsonToken.NULL) { nextNullMethod.invoke(gsonReader); return null; } else { return Base64.getDecoder().decode((String) nextStringMethod.invoke(gsonReader)); } } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public boolean getBoolean() throws IOException { consumed = true; try { return (boolean) nextBooleanMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public float getFloat() throws IOException { consumed = true; try { return (float) (double) nextDoubleMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public double getDouble() throws IOException { consumed = true; try { return (double) nextDoubleMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public int getInt() throws IOException { consumed = true; try { return (int) nextIntMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public long getLong() throws IOException { consumed = true; try { return (long) nextLongMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public String getString() throws IOException { consumed = true; try { if (currentToken == JsonToken.NULL) { return null; } else { return (String) nextStringMethod.invoke(gsonReader); } } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public String getFieldName() throws IOException { consumed = true; try { return (String) nextNameMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public void skipChildren() throws IOException { consumed = true; try { skipValueMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public JsonReader bufferObject() throws IOException { if (currentToken == JsonToken.START_OBJECT || (currentToken == JsonToken.FIELD_NAME && nextToken() == JsonToken.START_OBJECT)) { consumed = true; StringBuilder bufferedObject = new StringBuilder(); readChildren(bufferedObject); String json = bufferedObject.toString(); return new GsonJsonReader(new StringReader(json), true, null, json, nonNumericNumbersSupported); } else { throw new IllegalStateException("Cannot buffer a JSON object from a non-object, non-field name " + "starting location. Starting location: " + currentToken()); } } @Override public boolean resetSupported() { return resetSupported; } @Override public JsonReader reset() { if (!resetSupported) { throw new IllegalStateException("'reset' isn't supported by this JsonReader."); } if (jsonBytes != null) { return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(jsonBytes), StandardCharsets.UTF_8), true, jsonBytes, null, nonNumericNumbersSupported); } else { return new GsonJsonReader(new StringReader(jsonString), true, null, jsonString, nonNumericNumbersSupported); } } @Override public void close() throws IOException { try { closeMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } /* * Maps the GSON JsonToken to the azure-json JsonToken. */ private JsonToken mapToken(Enum<?> token) { if (token == null) { return null; } if (token.getClass() != gsonTokenEnum) { throw new IllegalStateException("Unsupported enum, pass a Gson JsonToken"); } switch (token.name()) { case "BEGIN_OBJECT": return JsonToken.START_OBJECT; case "END_OBJECT": return JsonToken.END_OBJECT; case "BEGIN_ARRAY": return JsonToken.START_ARRAY; case "END_ARRAY": return JsonToken.END_ARRAY; case "NAME": return JsonToken.FIELD_NAME; case "STRING": return JsonToken.STRING; case "NUMBER": return JsonToken.NUMBER; case "BOOLEAN": return JsonToken.BOOLEAN; case "NULL": return JsonToken.NULL; case "END_DOCUMENT": return JsonToken.END_DOCUMENT; default: throw new IllegalStateException("Unsupported token type: '" + token + "'."); } } }
class GsonJsonReader extends JsonReader { private static final Class<?> GSON_JSON_TOKEN_ENUM; private static final JsonReaderConstructor JSON_READER_CONSTRUCTOR; private static final JsonReaderSetLenient JSON_READER_SET_LENIENT; private static final JsonReaderClose JSON_READER_CLOSE; private static final JsonReaderPeek JSON_READER_PEEK; private static final JsonReaderBeginObject JSON_READER_BEGIN_OBJECT; private static final JsonReaderEndObject JSON_READER_END_OBJECT; private static final JsonReaderBeginArray JSON_READER_BEGIN_ARRAY; private static final JsonReaderEndArray JSON_READER_END_ARRAY; private static final JsonReaderNextNull JSON_READER_NEXT_NULL; private static final JsonReaderNextName JSON_READER_NEXT_NAME; private static final JsonReaderNextString JSON_READER_NEXT_STRING; private static final JsonReaderNextBoolean JSON_READER_NEXT_BOOLEAN; private static final JsonReaderNextInt JSON_READER_NEXT_INT; private static final JsonReaderNextLong JSON_READER_NEXT_LONG; private static final JsonReaderNextDouble JSON_READER_NEXT_DOUBLE; private static final JsonReaderSkipValue JSON_READER_SKIP_VALUE; static final boolean INITIALIZED; static { final MethodHandles.Lookup lookup = MethodHandles.lookup(); final MethodType voidMT = methodType(void.class); final MethodType voidObjectMT = methodType(void.class, Object.class); Class<?> gsonJsonTokenEnum = null; JsonReaderConstructor jsonReaderConstructor = null; JsonReaderSetLenient jsonReaderSetLenient = null; JsonReaderClose jsonReaderClose = null; JsonReaderPeek jsonReaderPeek = null; JsonReaderBeginObject jsonReaderBeginObject = null; JsonReaderEndObject jsonReaderEndObject = null; JsonReaderBeginArray jsonReaderBeginArray = null; JsonReaderEndArray jsonReaderEndArray = null; JsonReaderNextNull jsonReaderNextNull = null; JsonReaderNextName jsonReaderNextName = null; JsonReaderNextString jsonReaderNextString = null; JsonReaderNextBoolean jsonReaderNextBoolean = null; JsonReaderNextInt jsonReaderNextInt = null; JsonReaderNextLong jsonReaderNextLong = null; JsonReaderNextDouble jsonReaderNextDouble = null; JsonReaderSkipValue jsonReaderSkipValue = null; boolean initialized = false; try { Class<?> gsonJsonReaderClass = Class.forName("com.google.gson.stream.JsonReader"); gsonJsonTokenEnum = Class.forName("com.google.gson.stream.JsonToken"); MethodHandle gsonReaderConstructor = lookup.findConstructor(gsonJsonReaderClass, methodType(void.class, Reader.class)); jsonReaderConstructor = (JsonReaderConstructor) LambdaMetafactory.metafactory(lookup, "createJsonReader", methodType(JsonReaderConstructor.class), methodType(Object.class, Reader.class), gsonReaderConstructor, gsonReaderConstructor.type()).getTarget().invoke(); jsonReaderSetLenient = createMetaFactory("setLenient", gsonJsonReaderClass, methodType(void.class, boolean.class), JsonReaderSetLenient.class, methodType(void.class, Object.class, boolean.class), lookup); jsonReaderClose = createMetaFactory("close", gsonJsonReaderClass, voidMT, JsonReaderClose.class, voidObjectMT, lookup); jsonReaderPeek = createMetaFactory("peek", gsonJsonReaderClass, methodType(gsonJsonTokenEnum), JsonReaderPeek.class, methodType(Object.class, Object.class), lookup); jsonReaderBeginObject = createMetaFactory("beginObject", gsonJsonReaderClass, voidMT, JsonReaderBeginObject.class, voidObjectMT, lookup); jsonReaderEndObject = createMetaFactory("endObject", gsonJsonReaderClass, voidMT, JsonReaderEndObject.class, voidObjectMT, lookup); jsonReaderBeginArray = createMetaFactory("beginArray", gsonJsonReaderClass, voidMT, JsonReaderBeginArray.class, voidObjectMT, lookup); jsonReaderEndArray = createMetaFactory("endArray", gsonJsonReaderClass, voidMT, JsonReaderEndArray.class, voidObjectMT, lookup); jsonReaderNextNull = createMetaFactory("nextNull", gsonJsonReaderClass, voidMT, JsonReaderNextNull.class, voidObjectMT, lookup); jsonReaderNextName = createMetaFactory("nextName", gsonJsonReaderClass, methodType(String.class), JsonReaderNextName.class, methodType(String.class, Object.class), lookup); jsonReaderNextString = createMetaFactory("nextString", gsonJsonReaderClass, methodType(String.class), JsonReaderNextString.class, methodType(String.class, Object.class), lookup); jsonReaderNextBoolean = createMetaFactory("nextBoolean", gsonJsonReaderClass, methodType(boolean.class), JsonReaderNextBoolean.class, methodType(boolean.class, Object.class), lookup); jsonReaderNextInt = createMetaFactory("nextInt", gsonJsonReaderClass, methodType(int.class), JsonReaderNextInt.class, methodType(int.class, Object.class), lookup); jsonReaderNextLong = createMetaFactory("nextLong", gsonJsonReaderClass, methodType(long.class), JsonReaderNextLong.class, methodType(long.class, Object.class), lookup); jsonReaderNextDouble = createMetaFactory("nextDouble", gsonJsonReaderClass, methodType(double.class), JsonReaderNextDouble.class, methodType(double.class, Object.class), lookup); jsonReaderSkipValue = createMetaFactory("skipValue", gsonJsonReaderClass, voidMT, JsonReaderSkipValue.class, voidObjectMT, lookup); initialized = true; } catch (Throwable e) { if (e instanceof RuntimeException) { throw (RuntimeException) e; } else if (e instanceof Error) { throw (Error) e; } } GSON_JSON_TOKEN_ENUM = gsonJsonTokenEnum; JSON_READER_CONSTRUCTOR = jsonReaderConstructor; JSON_READER_SET_LENIENT = jsonReaderSetLenient; JSON_READER_CLOSE = jsonReaderClose; JSON_READER_PEEK = jsonReaderPeek; JSON_READER_BEGIN_OBJECT = jsonReaderBeginObject; JSON_READER_END_OBJECT = jsonReaderEndObject; JSON_READER_BEGIN_ARRAY = jsonReaderBeginArray; JSON_READER_END_ARRAY = jsonReaderEndArray; JSON_READER_NEXT_NULL = jsonReaderNextNull; JSON_READER_NEXT_NAME = jsonReaderNextName; JSON_READER_NEXT_STRING = jsonReaderNextString; JSON_READER_NEXT_BOOLEAN = jsonReaderNextBoolean; JSON_READER_NEXT_INT = jsonReaderNextInt; JSON_READER_NEXT_LONG = jsonReaderNextLong; JSON_READER_NEXT_DOUBLE = jsonReaderNextDouble; JSON_READER_SKIP_VALUE = jsonReaderSkipValue; INITIALIZED = initialized; } private final Object gsonJsonReader; private JsonToken currentToken; private final byte[] jsonBytes; private final String jsonString; private final boolean resetSupported; private final boolean nonNumericNumbersSupported; private boolean consumed = false; private boolean complete = false; static JsonReader fromBytes(byte[] json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(json), StandardCharsets.UTF_8), true, json, null, options); } static JsonReader fromString(String json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new StringReader(json), true, null, json, options); } static JsonReader fromStream(InputStream json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(json, StandardCharsets.UTF_8), json.markSupported(), null, null, options); } static JsonReader fromReader(Reader json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(json, json.markSupported(), null, null, options); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, JsonOptions options) { this(reader, resetSupported, jsonBytes, jsonString, options.isNonNumericNumbersSupported()); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, boolean nonNumericNumbersSupported) { if (!INITIALIZED) { throw new IllegalStateException("No compatible version of Gson is present on the classpath."); } gsonJsonReader = JSON_READER_CONSTRUCTOR.createJsonReader(reader); JSON_READER_SET_LENIENT.setLenient(gsonJsonReader, nonNumericNumbersSupported); this.resetSupported = resetSupported; this.jsonBytes = jsonBytes; this.jsonString = jsonString; this.nonNumericNumbersSupported = nonNumericNumbersSupported; } @Override public JsonToken currentToken() { return currentToken; } @Override @Override public byte[] getBinary() throws IOException { consumed = true; if (currentToken == JsonToken.NULL) { JSON_READER_NEXT_NULL.nextNull(gsonJsonReader); return null; } else { return Base64.getDecoder().decode(JSON_READER_NEXT_STRING.nextString(gsonJsonReader)); } } @Override public boolean getBoolean() throws IOException { consumed = true; return JSON_READER_NEXT_BOOLEAN.nextBoolean(gsonJsonReader); } @Override public float getFloat() throws IOException { consumed = true; return (float) JSON_READER_NEXT_DOUBLE.nextDouble(gsonJsonReader); } @Override public double getDouble() throws IOException { consumed = true; return JSON_READER_NEXT_DOUBLE.nextDouble(gsonJsonReader); } @Override public int getInt() throws IOException { consumed = true; return JSON_READER_NEXT_INT.nextInt(gsonJsonReader); } @Override public long getLong() throws IOException { consumed = true; return JSON_READER_NEXT_LONG.nextLong(gsonJsonReader); } @Override public String getString() throws IOException { consumed = true; if (currentToken == JsonToken.NULL) { return null; } else { return JSON_READER_NEXT_STRING.nextString(gsonJsonReader); } } @Override public String getFieldName() throws IOException { consumed = true; return JSON_READER_NEXT_NAME.nextName(gsonJsonReader); } @Override public void skipChildren() throws IOException { consumed = true; JSON_READER_SKIP_VALUE.skipValue(gsonJsonReader); } @Override public JsonReader bufferObject() throws IOException { if (currentToken == JsonToken.START_OBJECT || (currentToken == JsonToken.FIELD_NAME && nextToken() == JsonToken.START_OBJECT)) { consumed = true; StringBuilder bufferedObject = new StringBuilder(); readChildren(bufferedObject); String json = bufferedObject.toString(); return new GsonJsonReader(new StringReader(json), true, null, json, nonNumericNumbersSupported); } else { throw new IllegalStateException("Cannot buffer a JSON object from a non-object, non-field name " + "starting location. Starting location: " + currentToken()); } } @Override public boolean resetSupported() { return resetSupported; } @Override public JsonReader reset() { if (!resetSupported) { throw new IllegalStateException("'reset' isn't supported by this JsonReader."); } if (jsonBytes != null) { return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(jsonBytes), StandardCharsets.UTF_8), true, jsonBytes, null, nonNumericNumbersSupported); } else { return new GsonJsonReader(new StringReader(jsonString), true, null, jsonString, nonNumericNumbersSupported); } } @Override public void close() throws IOException { JSON_READER_CLOSE.close(gsonJsonReader); } /* * Maps the GSON JsonToken to the azure-json JsonToken. */ private JsonToken mapToken(Enum<?> token) { if (token == null) { return null; } if (token.getClass() != GSON_JSON_TOKEN_ENUM) { throw new IllegalStateException("Unsupported enum, pass a Gson JsonToken"); } switch (token.name()) { case "BEGIN_OBJECT": return JsonToken.START_OBJECT; case "END_OBJECT": return JsonToken.END_OBJECT; case "BEGIN_ARRAY": return JsonToken.START_ARRAY; case "END_ARRAY": return JsonToken.END_ARRAY; case "NAME": return JsonToken.FIELD_NAME; case "STRING": return JsonToken.STRING; case "NUMBER": return JsonToken.NUMBER; case "BOOLEAN": return JsonToken.BOOLEAN; case "NULL": return JsonToken.NULL; case "END_DOCUMENT": return JsonToken.END_DOCUMENT; default: throw new IllegalStateException("Unsupported token type: '" + token + "'."); } } @FunctionalInterface private interface JsonReaderConstructor { Object createJsonReader(Reader reader); } @FunctionalInterface private interface JsonReaderSetLenient { void setLenient(Object jsonReader, boolean lenient); } @FunctionalInterface private interface JsonReaderClose { void close(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderPeek { Object peek(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderBeginObject { void beginObject(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderEndObject { void endObject(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderBeginArray { void beginArray(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderEndArray { void endArray(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextNull { void nextNull(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextName { String nextName(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextString { String nextString(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextBoolean { boolean nextBoolean(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextInt { int nextInt(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextLong { long nextLong(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextDouble { double nextDouble(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderSkipValue { void skipValue(Object jsonReader) throws IOException; } }
These catches are being avoiding by using `LambdaMetaFactory` which is nearing completion
public JsonToken nextToken() throws IOException { if (complete) { return currentToken; } try { if (currentToken == JsonToken.START_OBJECT) { beginObjectMethod.invoke(gsonReader); } else if (currentToken == JsonToken.END_OBJECT) { endObjectMethod.invoke(gsonReader); } else if (currentToken == JsonToken.START_ARRAY) { beginArrayMethod.invoke(gsonReader); } else if (currentToken == JsonToken.END_ARRAY) { endArrayMethod.invoke(gsonReader); } else if (currentToken == JsonToken.NULL) { nextNullMethod.invoke(gsonReader); } if (!consumed && currentToken != null) { switch (currentToken) { case FIELD_NAME: nextNameMethod.invoke(gsonReader); break; case BOOLEAN: nextBooleanMethod.invoke(gsonReader); break; case NUMBER: nextDoubleMethod.invoke(gsonReader); break; case STRING: nextStringMethod.invoke(gsonReader); break; default: break; } } currentToken = mapToken((Enum<?>) peekMethod.invoke(gsonReader)); if (currentToken == JsonToken.END_DOCUMENT) { complete = true; } consumed = false; return currentToken; } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } }
}
public JsonToken nextToken() throws IOException { if (complete) { return currentToken; } if (currentToken == JsonToken.START_OBJECT) { JSON_READER_BEGIN_OBJECT.beginObject(gsonJsonReader); } else if (currentToken == JsonToken.END_OBJECT) { JSON_READER_END_OBJECT.endObject(gsonJsonReader); } else if (currentToken == JsonToken.START_ARRAY) { JSON_READER_BEGIN_ARRAY.beginArray(gsonJsonReader); } else if (currentToken == JsonToken.END_ARRAY) { JSON_READER_END_ARRAY.endArray(gsonJsonReader); } else if (currentToken == JsonToken.NULL) { JSON_READER_NEXT_NULL.nextNull(gsonJsonReader); } if (!consumed && currentToken != null) { switch (currentToken) { case FIELD_NAME: JSON_READER_NEXT_NAME.nextName(gsonJsonReader); break; case BOOLEAN: JSON_READER_NEXT_BOOLEAN.nextBoolean(gsonJsonReader); break; case NUMBER: JSON_READER_NEXT_DOUBLE.nextDouble(gsonJsonReader); break; case STRING: JSON_READER_NEXT_STRING.nextString(gsonJsonReader); break; default: break; } } currentToken = mapToken((Enum<?>) JSON_READER_PEEK.peek(gsonJsonReader)); if (currentToken == JsonToken.END_DOCUMENT) { complete = true; } consumed = false; return currentToken; }
class GsonJsonReader extends JsonReader { private static boolean initialized = false; private static boolean attemptedInitialization = false; private static final MethodHandles.Lookup publicLookup = MethodHandles.publicLookup(); private static Class<?> gsonTokenEnum = null; private final Object gsonReader; private JsonToken currentToken; private static MethodHandle gsonReaderConstructor; private static MethodHandle setLenientMethod; private static MethodHandle peekMethod; private static MethodHandle closeMethod; private static MethodHandle beginObjectMethod; private static MethodHandle endObjectMethod; private static MethodHandle beginArrayMethod; private static MethodHandle endArrayMethod; private static MethodHandle nextNullMethod; private static MethodHandle nextBooleanMethod; private static MethodHandle nextStringMethod; private static MethodHandle nextDoubleMethod; private static MethodHandle nextIntMethod; private static MethodHandle nextLongMethod; private static MethodHandle nextNameMethod; private static MethodHandle skipValueMethod; private final byte[] jsonBytes; private final String jsonString; private final boolean resetSupported; private final boolean nonNumericNumbersSupported; private boolean consumed = false; private boolean complete = false; /** * Constructs an instance of {@link JsonReader} from a {@code byte[]}. * * @param json JSON {@code byte[]}. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link JsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromBytes(byte[] json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(json), StandardCharsets.UTF_8), true, json, null, options); } /** * Constructs an instance of {@link JsonReader} from a String. * * @param json JSON String. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link JsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromString(String json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new StringReader(json), true, null, json, options); } /** * Constructs an instance of {@link JsonReader} from an {@link InputStream}. * * @param json JSON {@link InputStream}. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link JsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromStream(InputStream json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(json, StandardCharsets.UTF_8), json.markSupported(), null, null, options); } /** * Constructs an instance of {@link GsonJsonReader} from a {@link Reader}. * * @param json JSON {@link Reader}. * @param options {@link JsonOptions} to configure the creation of the {@link JsonReader}. * @return An instance of {@link GsonJsonReader}. * @throws NullPointerException If {@code json} is null. */ static JsonReader fromReader(Reader json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(json, json.markSupported(), null, null, options); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, JsonOptions options) { this(reader, resetSupported, jsonBytes, jsonString, options.isNonNumericNumbersSupported()); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, boolean nonNumericNumbersSupported) { try { initialize(); gsonReader = gsonReaderConstructor.invoke(reader); setLenientMethod.invoke(gsonReader, nonNumericNumbersSupported); } catch (Throwable e) { if (e instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } else { throw new IllegalStateException("Gson is not present or an incorrect version is present."); } } this.resetSupported = resetSupported; this.jsonBytes = jsonBytes; this.jsonString = jsonString; this.nonNumericNumbersSupported = nonNumericNumbersSupported; } static synchronized void initialize() throws ReflectiveOperationException { if (initialized) { return; } else if (attemptedInitialization) { throw new ReflectiveOperationException("Initialization of GsonJsonReader has failed in the past."); } attemptedInitialization = true; gsonTokenEnum = Class.forName("com.google.gson.stream.JsonToken"); Class<?> gsonReaderClass = Class.forName("com.google.gson.stream.JsonReader"); MethodType voidMT = methodType(void.class); gsonReaderConstructor = publicLookup.findConstructor(gsonReaderClass, methodType(void.class, Reader.class)); setLenientMethod = publicLookup.findVirtual(gsonReaderClass, "setLenient", methodType(void.class, boolean.class)); peekMethod = publicLookup.findVirtual(gsonReaderClass, "peek", methodType(gsonTokenEnum)); closeMethod = publicLookup.findVirtual(gsonReaderClass, "close", voidMT); beginObjectMethod = publicLookup.findVirtual(gsonReaderClass, "beginObject", voidMT); endObjectMethod = publicLookup.findVirtual(gsonReaderClass, "endObject", voidMT); beginArrayMethod = publicLookup.findVirtual(gsonReaderClass, "beginArray", voidMT); endArrayMethod = publicLookup.findVirtual(gsonReaderClass, "endArray", voidMT); nextNullMethod = publicLookup.findVirtual(gsonReaderClass, "nextNull", voidMT); nextBooleanMethod = publicLookup.findVirtual(gsonReaderClass, "nextBoolean", methodType(boolean.class)); nextStringMethod = publicLookup.findVirtual(gsonReaderClass, "nextString", methodType(String.class)); nextDoubleMethod = publicLookup.findVirtual(gsonReaderClass, "nextDouble", methodType(double.class)); nextIntMethod = publicLookup.findVirtual(gsonReaderClass, "nextInt", methodType(int.class)); nextLongMethod = publicLookup.findVirtual(gsonReaderClass, "nextLong", methodType(long.class)); nextNameMethod = publicLookup.findVirtual(gsonReaderClass, "nextName", methodType(String.class)); skipValueMethod = publicLookup.findVirtual(gsonReaderClass, "skipValue", voidMT); initialized = true; } @Override public JsonToken currentToken() { return currentToken; } @Override @Override public byte[] getBinary() throws IOException { consumed = true; try { if (currentToken == JsonToken.NULL) { nextNullMethod.invoke(gsonReader); return null; } else { return Base64.getDecoder().decode((String) nextStringMethod.invoke(gsonReader)); } } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public boolean getBoolean() throws IOException { consumed = true; try { return (boolean) nextBooleanMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public float getFloat() throws IOException { consumed = true; try { return (float) (double) nextDoubleMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public double getDouble() throws IOException { consumed = true; try { return (double) nextDoubleMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public int getInt() throws IOException { consumed = true; try { return (int) nextIntMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public long getLong() throws IOException { consumed = true; try { return (long) nextLongMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public String getString() throws IOException { consumed = true; try { if (currentToken == JsonToken.NULL) { return null; } else { return (String) nextStringMethod.invoke(gsonReader); } } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public String getFieldName() throws IOException { consumed = true; try { return (String) nextNameMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public void skipChildren() throws IOException { consumed = true; try { skipValueMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } @Override public JsonReader bufferObject() throws IOException { if (currentToken == JsonToken.START_OBJECT || (currentToken == JsonToken.FIELD_NAME && nextToken() == JsonToken.START_OBJECT)) { consumed = true; StringBuilder bufferedObject = new StringBuilder(); readChildren(bufferedObject); String json = bufferedObject.toString(); return new GsonJsonReader(new StringReader(json), true, null, json, nonNumericNumbersSupported); } else { throw new IllegalStateException("Cannot buffer a JSON object from a non-object, non-field name " + "starting location. Starting location: " + currentToken()); } } @Override public boolean resetSupported() { return resetSupported; } @Override public JsonReader reset() { if (!resetSupported) { throw new IllegalStateException("'reset' isn't supported by this JsonReader."); } if (jsonBytes != null) { return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(jsonBytes), StandardCharsets.UTF_8), true, jsonBytes, null, nonNumericNumbersSupported); } else { return new GsonJsonReader(new StringReader(jsonString), true, null, jsonString, nonNumericNumbersSupported); } } @Override public void close() throws IOException { try { closeMethod.invoke(gsonReader); } catch (Throwable e) { if (e instanceof IOException) { throw (IOException) e.getCause(); } else { throw (RuntimeException) e.getCause(); } } } /* * Maps the GSON JsonToken to the azure-json JsonToken. */ private JsonToken mapToken(Enum<?> token) { if (token == null) { return null; } if (token.getClass() != gsonTokenEnum) { throw new IllegalStateException("Unsupported enum, pass a Gson JsonToken"); } switch (token.name()) { case "BEGIN_OBJECT": return JsonToken.START_OBJECT; case "END_OBJECT": return JsonToken.END_OBJECT; case "BEGIN_ARRAY": return JsonToken.START_ARRAY; case "END_ARRAY": return JsonToken.END_ARRAY; case "NAME": return JsonToken.FIELD_NAME; case "STRING": return JsonToken.STRING; case "NUMBER": return JsonToken.NUMBER; case "BOOLEAN": return JsonToken.BOOLEAN; case "NULL": return JsonToken.NULL; case "END_DOCUMENT": return JsonToken.END_DOCUMENT; default: throw new IllegalStateException("Unsupported token type: '" + token + "'."); } } }
class GsonJsonReader extends JsonReader { private static final Class<?> GSON_JSON_TOKEN_ENUM; private static final JsonReaderConstructor JSON_READER_CONSTRUCTOR; private static final JsonReaderSetLenient JSON_READER_SET_LENIENT; private static final JsonReaderClose JSON_READER_CLOSE; private static final JsonReaderPeek JSON_READER_PEEK; private static final JsonReaderBeginObject JSON_READER_BEGIN_OBJECT; private static final JsonReaderEndObject JSON_READER_END_OBJECT; private static final JsonReaderBeginArray JSON_READER_BEGIN_ARRAY; private static final JsonReaderEndArray JSON_READER_END_ARRAY; private static final JsonReaderNextNull JSON_READER_NEXT_NULL; private static final JsonReaderNextName JSON_READER_NEXT_NAME; private static final JsonReaderNextString JSON_READER_NEXT_STRING; private static final JsonReaderNextBoolean JSON_READER_NEXT_BOOLEAN; private static final JsonReaderNextInt JSON_READER_NEXT_INT; private static final JsonReaderNextLong JSON_READER_NEXT_LONG; private static final JsonReaderNextDouble JSON_READER_NEXT_DOUBLE; private static final JsonReaderSkipValue JSON_READER_SKIP_VALUE; static final boolean INITIALIZED; static { final MethodHandles.Lookup lookup = MethodHandles.lookup(); final MethodType voidMT = methodType(void.class); final MethodType voidObjectMT = methodType(void.class, Object.class); Class<?> gsonJsonTokenEnum = null; JsonReaderConstructor jsonReaderConstructor = null; JsonReaderSetLenient jsonReaderSetLenient = null; JsonReaderClose jsonReaderClose = null; JsonReaderPeek jsonReaderPeek = null; JsonReaderBeginObject jsonReaderBeginObject = null; JsonReaderEndObject jsonReaderEndObject = null; JsonReaderBeginArray jsonReaderBeginArray = null; JsonReaderEndArray jsonReaderEndArray = null; JsonReaderNextNull jsonReaderNextNull = null; JsonReaderNextName jsonReaderNextName = null; JsonReaderNextString jsonReaderNextString = null; JsonReaderNextBoolean jsonReaderNextBoolean = null; JsonReaderNextInt jsonReaderNextInt = null; JsonReaderNextLong jsonReaderNextLong = null; JsonReaderNextDouble jsonReaderNextDouble = null; JsonReaderSkipValue jsonReaderSkipValue = null; boolean initialized = false; try { Class<?> gsonJsonReaderClass = Class.forName("com.google.gson.stream.JsonReader"); gsonJsonTokenEnum = Class.forName("com.google.gson.stream.JsonToken"); MethodHandle gsonReaderConstructor = lookup.findConstructor(gsonJsonReaderClass, methodType(void.class, Reader.class)); jsonReaderConstructor = (JsonReaderConstructor) LambdaMetafactory.metafactory(lookup, "createJsonReader", methodType(JsonReaderConstructor.class), methodType(Object.class, Reader.class), gsonReaderConstructor, gsonReaderConstructor.type()).getTarget().invoke(); jsonReaderSetLenient = createMetaFactory("setLenient", gsonJsonReaderClass, methodType(void.class, boolean.class), JsonReaderSetLenient.class, methodType(void.class, Object.class, boolean.class), lookup); jsonReaderClose = createMetaFactory("close", gsonJsonReaderClass, voidMT, JsonReaderClose.class, voidObjectMT, lookup); jsonReaderPeek = createMetaFactory("peek", gsonJsonReaderClass, methodType(gsonJsonTokenEnum), JsonReaderPeek.class, methodType(Object.class, Object.class), lookup); jsonReaderBeginObject = createMetaFactory("beginObject", gsonJsonReaderClass, voidMT, JsonReaderBeginObject.class, voidObjectMT, lookup); jsonReaderEndObject = createMetaFactory("endObject", gsonJsonReaderClass, voidMT, JsonReaderEndObject.class, voidObjectMT, lookup); jsonReaderBeginArray = createMetaFactory("beginArray", gsonJsonReaderClass, voidMT, JsonReaderBeginArray.class, voidObjectMT, lookup); jsonReaderEndArray = createMetaFactory("endArray", gsonJsonReaderClass, voidMT, JsonReaderEndArray.class, voidObjectMT, lookup); jsonReaderNextNull = createMetaFactory("nextNull", gsonJsonReaderClass, voidMT, JsonReaderNextNull.class, voidObjectMT, lookup); jsonReaderNextName = createMetaFactory("nextName", gsonJsonReaderClass, methodType(String.class), JsonReaderNextName.class, methodType(String.class, Object.class), lookup); jsonReaderNextString = createMetaFactory("nextString", gsonJsonReaderClass, methodType(String.class), JsonReaderNextString.class, methodType(String.class, Object.class), lookup); jsonReaderNextBoolean = createMetaFactory("nextBoolean", gsonJsonReaderClass, methodType(boolean.class), JsonReaderNextBoolean.class, methodType(boolean.class, Object.class), lookup); jsonReaderNextInt = createMetaFactory("nextInt", gsonJsonReaderClass, methodType(int.class), JsonReaderNextInt.class, methodType(int.class, Object.class), lookup); jsonReaderNextLong = createMetaFactory("nextLong", gsonJsonReaderClass, methodType(long.class), JsonReaderNextLong.class, methodType(long.class, Object.class), lookup); jsonReaderNextDouble = createMetaFactory("nextDouble", gsonJsonReaderClass, methodType(double.class), JsonReaderNextDouble.class, methodType(double.class, Object.class), lookup); jsonReaderSkipValue = createMetaFactory("skipValue", gsonJsonReaderClass, voidMT, JsonReaderSkipValue.class, voidObjectMT, lookup); initialized = true; } catch (Throwable e) { if (e instanceof RuntimeException) { throw (RuntimeException) e; } else if (e instanceof Error) { throw (Error) e; } } GSON_JSON_TOKEN_ENUM = gsonJsonTokenEnum; JSON_READER_CONSTRUCTOR = jsonReaderConstructor; JSON_READER_SET_LENIENT = jsonReaderSetLenient; JSON_READER_CLOSE = jsonReaderClose; JSON_READER_PEEK = jsonReaderPeek; JSON_READER_BEGIN_OBJECT = jsonReaderBeginObject; JSON_READER_END_OBJECT = jsonReaderEndObject; JSON_READER_BEGIN_ARRAY = jsonReaderBeginArray; JSON_READER_END_ARRAY = jsonReaderEndArray; JSON_READER_NEXT_NULL = jsonReaderNextNull; JSON_READER_NEXT_NAME = jsonReaderNextName; JSON_READER_NEXT_STRING = jsonReaderNextString; JSON_READER_NEXT_BOOLEAN = jsonReaderNextBoolean; JSON_READER_NEXT_INT = jsonReaderNextInt; JSON_READER_NEXT_LONG = jsonReaderNextLong; JSON_READER_NEXT_DOUBLE = jsonReaderNextDouble; JSON_READER_SKIP_VALUE = jsonReaderSkipValue; INITIALIZED = initialized; } private final Object gsonJsonReader; private JsonToken currentToken; private final byte[] jsonBytes; private final String jsonString; private final boolean resetSupported; private final boolean nonNumericNumbersSupported; private boolean consumed = false; private boolean complete = false; static JsonReader fromBytes(byte[] json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(json), StandardCharsets.UTF_8), true, json, null, options); } static JsonReader fromString(String json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new StringReader(json), true, null, json, options); } static JsonReader fromStream(InputStream json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(new InputStreamReader(json, StandardCharsets.UTF_8), json.markSupported(), null, null, options); } static JsonReader fromReader(Reader json, JsonOptions options) { Objects.requireNonNull(json, "'json' cannot be null."); return new GsonJsonReader(json, json.markSupported(), null, null, options); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, JsonOptions options) { this(reader, resetSupported, jsonBytes, jsonString, options.isNonNumericNumbersSupported()); } private GsonJsonReader(Reader reader, boolean resetSupported, byte[] jsonBytes, String jsonString, boolean nonNumericNumbersSupported) { if (!INITIALIZED) { throw new IllegalStateException("No compatible version of Gson is present on the classpath."); } gsonJsonReader = JSON_READER_CONSTRUCTOR.createJsonReader(reader); JSON_READER_SET_LENIENT.setLenient(gsonJsonReader, nonNumericNumbersSupported); this.resetSupported = resetSupported; this.jsonBytes = jsonBytes; this.jsonString = jsonString; this.nonNumericNumbersSupported = nonNumericNumbersSupported; } @Override public JsonToken currentToken() { return currentToken; } @Override @Override public byte[] getBinary() throws IOException { consumed = true; if (currentToken == JsonToken.NULL) { JSON_READER_NEXT_NULL.nextNull(gsonJsonReader); return null; } else { return Base64.getDecoder().decode(JSON_READER_NEXT_STRING.nextString(gsonJsonReader)); } } @Override public boolean getBoolean() throws IOException { consumed = true; return JSON_READER_NEXT_BOOLEAN.nextBoolean(gsonJsonReader); } @Override public float getFloat() throws IOException { consumed = true; return (float) JSON_READER_NEXT_DOUBLE.nextDouble(gsonJsonReader); } @Override public double getDouble() throws IOException { consumed = true; return JSON_READER_NEXT_DOUBLE.nextDouble(gsonJsonReader); } @Override public int getInt() throws IOException { consumed = true; return JSON_READER_NEXT_INT.nextInt(gsonJsonReader); } @Override public long getLong() throws IOException { consumed = true; return JSON_READER_NEXT_LONG.nextLong(gsonJsonReader); } @Override public String getString() throws IOException { consumed = true; if (currentToken == JsonToken.NULL) { return null; } else { return JSON_READER_NEXT_STRING.nextString(gsonJsonReader); } } @Override public String getFieldName() throws IOException { consumed = true; return JSON_READER_NEXT_NAME.nextName(gsonJsonReader); } @Override public void skipChildren() throws IOException { consumed = true; JSON_READER_SKIP_VALUE.skipValue(gsonJsonReader); } @Override public JsonReader bufferObject() throws IOException { if (currentToken == JsonToken.START_OBJECT || (currentToken == JsonToken.FIELD_NAME && nextToken() == JsonToken.START_OBJECT)) { consumed = true; StringBuilder bufferedObject = new StringBuilder(); readChildren(bufferedObject); String json = bufferedObject.toString(); return new GsonJsonReader(new StringReader(json), true, null, json, nonNumericNumbersSupported); } else { throw new IllegalStateException("Cannot buffer a JSON object from a non-object, non-field name " + "starting location. Starting location: " + currentToken()); } } @Override public boolean resetSupported() { return resetSupported; } @Override public JsonReader reset() { if (!resetSupported) { throw new IllegalStateException("'reset' isn't supported by this JsonReader."); } if (jsonBytes != null) { return new GsonJsonReader(new InputStreamReader(new ByteArrayInputStream(jsonBytes), StandardCharsets.UTF_8), true, jsonBytes, null, nonNumericNumbersSupported); } else { return new GsonJsonReader(new StringReader(jsonString), true, null, jsonString, nonNumericNumbersSupported); } } @Override public void close() throws IOException { JSON_READER_CLOSE.close(gsonJsonReader); } /* * Maps the GSON JsonToken to the azure-json JsonToken. */ private JsonToken mapToken(Enum<?> token) { if (token == null) { return null; } if (token.getClass() != GSON_JSON_TOKEN_ENUM) { throw new IllegalStateException("Unsupported enum, pass a Gson JsonToken"); } switch (token.name()) { case "BEGIN_OBJECT": return JsonToken.START_OBJECT; case "END_OBJECT": return JsonToken.END_OBJECT; case "BEGIN_ARRAY": return JsonToken.START_ARRAY; case "END_ARRAY": return JsonToken.END_ARRAY; case "NAME": return JsonToken.FIELD_NAME; case "STRING": return JsonToken.STRING; case "NUMBER": return JsonToken.NUMBER; case "BOOLEAN": return JsonToken.BOOLEAN; case "NULL": return JsonToken.NULL; case "END_DOCUMENT": return JsonToken.END_DOCUMENT; default: throw new IllegalStateException("Unsupported token type: '" + token + "'."); } } @FunctionalInterface private interface JsonReaderConstructor { Object createJsonReader(Reader reader); } @FunctionalInterface private interface JsonReaderSetLenient { void setLenient(Object jsonReader, boolean lenient); } @FunctionalInterface private interface JsonReaderClose { void close(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderPeek { Object peek(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderBeginObject { void beginObject(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderEndObject { void endObject(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderBeginArray { void beginArray(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderEndArray { void endArray(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextNull { void nextNull(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextName { String nextName(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextString { String nextString(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextBoolean { boolean nextBoolean(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextInt { int nextInt(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextLong { long nextLong(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderNextDouble { double nextDouble(Object jsonReader) throws IOException; } @FunctionalInterface private interface JsonReaderSkipValue { void skipValue(Object jsonReader) throws IOException; } }
Using a string Constant for x-ms-encryption-context to avoid dependency on azure-core package.
static String getEncryptionContext(Response<?> r) { if (r == null) { return null; } return r.getHeaders().getValue("x-ms-encryption-context"); }
return r.getHeaders().getValue("x-ms-encryption-context");
static String getEncryptionContext(Response<?> r) { if (r == null) { return null; } return r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT); }
class Transforms { private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or " + "%s.", FileQueryJsonSerialization.class.getSimpleName(), FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(), FileQueryParquetSerialization.class.getSimpleName()); private static final long EPOCH_CONVERSION; static { GregorianCalendar unixEpoch = new GregorianCalendar(); unixEpoch.clear(); unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0); GregorianCalendar windowsEpoch = new GregorianCalendar(); windowsEpoch.clear(); windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0); EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis(); } static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); return AccessorUtility.getFileSystemPropertiesAccessor() .setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(), blobContainerProperties.isEncryptionScopeOverridePrevented()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { if (fileSystemListDetails == null) { return null; } return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()) .setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted()) .setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { if (listFileSystemsOptions == null) { return null; } return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) { if (options == null) { return null; } return new BlobInputStreamOptions() .setBlockSize(options.getBlockSize()) .setRange(toBlobRange(options.getRange())) .setRequestConditions(toBlobRequestConditions(options.getRequestConditions())) .setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl())); } static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl( com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) { if (datalakeConsistentReadControl == null) { return null; } switch (datalakeConsistentReadControl) { case NONE: return ConsistentReadControl.NONE; case ETAG: return ConsistentReadControl.ETAG; default: throw new IllegalArgumentException("Could not convert ConsistentReadControl"); } } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties, String encryptionContext) { if (properties == null) { return null; } else { PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn()); return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties, properties.getEncryptionScope(), encryptionContext); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setDeleted(blobContainerItem.isDeleted()) .setVersion(blobContainerItem.getVersion()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()) .setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } PathItem pathItem = new PathItem(path.getETag(), parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(), path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions(), path.getCreationTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getCreationTime())), path.getExpiryTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getExpiryTime()))); return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext()); } private static OffsetDateTime parseDateOrNull(String date) { return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) { if (fileTime == 0) { return null; } long fileTimeMs = fileTime / 10000; long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION; return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders(), getEncryptionContext(r))); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()) .setEncryptionContext(encryptionContext); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) { if (ser == null) { return null; } if (ser instanceof FileQueryJsonSerialization) { FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser; return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator()); } else if (ser instanceof FileQueryDelimitedSerialization) { FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser; return new BlobQueryDelimitedSerialization() .setColumnSeparator(delSer.getColumnSeparator()) .setEscapeChar(delSer.getEscapeChar()) .setFieldQuote(delSer.getFieldQuote()) .setHeadersPresent(delSer.isHeadersPresent()) .setRecordSeparator(delSer.getRecordSeparator()); } else if (ser instanceof FileQueryArrowSerialization) { FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser; return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema())); } else if (ser instanceof FileQueryParquetSerialization) { return new BlobQueryParquetSerialization(); } else { throw new IllegalArgumentException(SERIALIZATION_MESSAGE); } } private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) { if (schema == null) { return null; } List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size()); for (FileQueryArrowField field : schema) { blobSchema.add(toBlobQueryArrowField(field)); } return blobSchema; } private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) { if (field == null) { return null; } return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString())) .setName(field.getName()) .setPrecision(field.getPrecision()) .setScale(field.getScale()); } static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) { if (er == null) { return null; } return error -> er.accept(toFileQueryError(error)); } static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) { if (pr == null) { return null; } return progress -> pr.accept(toFileQueryProgress(progress)); } private static FileQueryError toFileQueryError(BlobQueryError error) { if (error == null) { return null; } return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition()); } private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) { if (progress == null) { return null; } return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes()); } static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) { if (r == null) { return null; } return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) { if (r == null) { return null; } return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toFileQueryHeaders(r.getDeserializedHeaders())); } private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) { if (h == null) { return null; } return new FileQueryHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getContentMd5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) { if (options == null) { return null; } if (options.getOutputStream() == null) { return new BlobQueryOptions(options.getExpression()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } else { return new BlobQueryOptions(options.getExpression(), options.getOutputStream()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } } static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) { if (options == null) { return null; } return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(), options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName()); } static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) { if (blobProps == null) { return null; } return new DataLakeServiceProperties() .setDefaultServiceVersion(blobProps.getDefaultServiceVersion()) .setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy())) .setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics())) .setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics())) .setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging())) .setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite())); } static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new DataLakeStaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) { if (blobLogging == null) { return null; } return new DataLakeAnalyticsLogging() .setDelete(blobLogging.isDelete()) .setRead(blobLogging.isRead()) .setWrite(blobLogging.isWrite()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy())) .setVersion(blobLogging.getVersion()); } static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) { if (blobRule == null) { return null; } return new DataLakeCorsRule() .setAllowedHeaders(blobRule.getAllowedHeaders()) .setAllowedMethods(blobRule.getAllowedMethods()) .setAllowedOrigins(blobRule.getAllowedOrigins()) .setExposedHeaders(blobRule.getExposedHeaders()) .setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds()); } static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) { if (blobMetrics == null) { return null; } return new DataLakeMetrics() .setEnabled(blobMetrics.isEnabled()) .setIncludeApis(blobMetrics.isIncludeApis()) .setVersion(blobMetrics.getVersion()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy())); } static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) { if (blobPolicy == null) { return null; } return new DataLakeRetentionPolicy() .setDays(blobPolicy.getDays()) .setEnabled(blobPolicy.isEnabled()); } static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) { if (datalakeProperties == null) { return null; } return new BlobServiceProperties() .setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion()) .setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy())) .setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics())) .setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics())) .setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging())) .setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite())); } static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new StaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) { if (datalakeLogging == null) { return null; } return new BlobAnalyticsLogging() .setDelete(datalakeLogging.isDelete()) .setRead(datalakeLogging.isRead()) .setWrite(datalakeLogging.isWrite()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy())) .setVersion(datalakeLogging.getVersion()); } static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) { if (datalakeRule == null) { return null; } return new BlobCorsRule() .setAllowedHeaders(datalakeRule.getAllowedHeaders()) .setAllowedMethods(datalakeRule.getAllowedMethods()) .setAllowedOrigins(datalakeRule.getAllowedOrigins()) .setExposedHeaders(datalakeRule.getExposedHeaders()) .setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds()); } static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) { if (datalakeMetrics == null) { return null; } return new BlobMetrics() .setEnabled(datalakeMetrics.isEnabled()) .setIncludeApis(datalakeMetrics.isIncludeApis()) .setVersion(datalakeMetrics.getVersion()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy())); } static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) { if (datalakePolicy == null) { return null; } return new BlobRetentionPolicy() .setDays(datalakePolicy.getDays()) .setEnabled(datalakePolicy.isEnabled()); } static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) { if (blobItem == null) { return null; } return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(), blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays()); } static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) { return new PathDeletedItem(blobPrefix.getName(), true, null, null, null); } static CustomerProvidedKey toBlobCustomerProvidedKey( com.azure.storage.file.datalake.models.CustomerProvidedKey key) { if (key == null) { return null; } return new CustomerProvidedKey(key.getKey()); } static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) { if (info == null) { return null; } return new CpkInfo() .setEncryptionKey(info.getEncryptionKey()) .setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString( info.getEncryptionAlgorithm().toString())) .setEncryptionKeySha256(info.getEncryptionKeySha256()); } static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) { if (fileSystemEncryptionScope == null) { return null; } return new BlobContainerEncryptionScope() .setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented()); } }
class Transforms { private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or " + "%s.", FileQueryJsonSerialization.class.getSimpleName(), FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(), FileQueryParquetSerialization.class.getSimpleName()); private static final long EPOCH_CONVERSION; public static final HttpHeaderName X_MS_ENCRYPTION_CONTEXT = HttpHeaderName.fromString("x-ms-encryption-context"); static { GregorianCalendar unixEpoch = new GregorianCalendar(); unixEpoch.clear(); unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0); GregorianCalendar windowsEpoch = new GregorianCalendar(); windowsEpoch.clear(); windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0); EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis(); } static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); return AccessorUtility.getFileSystemPropertiesAccessor() .setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(), blobContainerProperties.isEncryptionScopeOverridePrevented()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { if (fileSystemListDetails == null) { return null; } return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()) .setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted()) .setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { if (listFileSystemsOptions == null) { return null; } return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) { if (options == null) { return null; } return new BlobInputStreamOptions() .setBlockSize(options.getBlockSize()) .setRange(toBlobRange(options.getRange())) .setRequestConditions(toBlobRequestConditions(options.getRequestConditions())) .setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl())); } static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl( com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) { if (datalakeConsistentReadControl == null) { return null; } switch (datalakeConsistentReadControl) { case NONE: return ConsistentReadControl.NONE; case ETAG: return ConsistentReadControl.ETAG; default: throw new IllegalArgumentException("Could not convert ConsistentReadControl"); } } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { return toPathProperties(properties, null); } static PathProperties toPathProperties(BlobProperties properties, String encryptionContext) { if (properties == null) { return null; } else { PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn()); return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties, properties.getEncryptionScope(), encryptionContext); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setDeleted(blobContainerItem.isDeleted()) .setVersion(blobContainerItem.getVersion()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()) .setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } PathItem pathItem = new PathItem(path.getETag(), parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(), path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions(), path.getCreationTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getCreationTime())), path.getExpiryTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getExpiryTime()))); return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext()); } private static OffsetDateTime parseDateOrNull(String date) { return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) { if (fileTime == 0) { return null; } long fileTimeMs = fileTime / 10000; long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION; return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders(), getEncryptionContext(r))); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()) .setCreationTime(h.getCreationTime()) .setEncryptionContext(encryptionContext); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) { if (ser == null) { return null; } if (ser instanceof FileQueryJsonSerialization) { FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser; return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator()); } else if (ser instanceof FileQueryDelimitedSerialization) { FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser; return new BlobQueryDelimitedSerialization() .setColumnSeparator(delSer.getColumnSeparator()) .setEscapeChar(delSer.getEscapeChar()) .setFieldQuote(delSer.getFieldQuote()) .setHeadersPresent(delSer.isHeadersPresent()) .setRecordSeparator(delSer.getRecordSeparator()); } else if (ser instanceof FileQueryArrowSerialization) { FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser; return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema())); } else if (ser instanceof FileQueryParquetSerialization) { return new BlobQueryParquetSerialization(); } else { throw new IllegalArgumentException(SERIALIZATION_MESSAGE); } } private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) { if (schema == null) { return null; } List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size()); for (FileQueryArrowField field : schema) { blobSchema.add(toBlobQueryArrowField(field)); } return blobSchema; } private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) { if (field == null) { return null; } return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString())) .setName(field.getName()) .setPrecision(field.getPrecision()) .setScale(field.getScale()); } static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) { if (er == null) { return null; } return error -> er.accept(toFileQueryError(error)); } static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) { if (pr == null) { return null; } return progress -> pr.accept(toFileQueryProgress(progress)); } private static FileQueryError toFileQueryError(BlobQueryError error) { if (error == null) { return null; } return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition()); } private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) { if (progress == null) { return null; } return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes()); } static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) { if (r == null) { return null; } return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) { if (r == null) { return null; } return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toFileQueryHeaders(r.getDeserializedHeaders())); } private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) { if (h == null) { return null; } return new FileQueryHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getContentMd5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) { if (options == null) { return null; } if (options.getOutputStream() == null) { return new BlobQueryOptions(options.getExpression()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } else { return new BlobQueryOptions(options.getExpression(), options.getOutputStream()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } } static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) { if (options == null) { return null; } return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(), options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName()); } static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) { if (blobProps == null) { return null; } return new DataLakeServiceProperties() .setDefaultServiceVersion(blobProps.getDefaultServiceVersion()) .setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy())) .setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics())) .setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics())) .setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging())) .setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite())); } static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new DataLakeStaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) { if (blobLogging == null) { return null; } return new DataLakeAnalyticsLogging() .setDelete(blobLogging.isDelete()) .setRead(blobLogging.isRead()) .setWrite(blobLogging.isWrite()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy())) .setVersion(blobLogging.getVersion()); } static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) { if (blobRule == null) { return null; } return new DataLakeCorsRule() .setAllowedHeaders(blobRule.getAllowedHeaders()) .setAllowedMethods(blobRule.getAllowedMethods()) .setAllowedOrigins(blobRule.getAllowedOrigins()) .setExposedHeaders(blobRule.getExposedHeaders()) .setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds()); } static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) { if (blobMetrics == null) { return null; } return new DataLakeMetrics() .setEnabled(blobMetrics.isEnabled()) .setIncludeApis(blobMetrics.isIncludeApis()) .setVersion(blobMetrics.getVersion()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy())); } static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) { if (blobPolicy == null) { return null; } return new DataLakeRetentionPolicy() .setDays(blobPolicy.getDays()) .setEnabled(blobPolicy.isEnabled()); } static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) { if (datalakeProperties == null) { return null; } return new BlobServiceProperties() .setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion()) .setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy())) .setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics())) .setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics())) .setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging())) .setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite())); } static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new StaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) { if (datalakeLogging == null) { return null; } return new BlobAnalyticsLogging() .setDelete(datalakeLogging.isDelete()) .setRead(datalakeLogging.isRead()) .setWrite(datalakeLogging.isWrite()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy())) .setVersion(datalakeLogging.getVersion()); } static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) { if (datalakeRule == null) { return null; } return new BlobCorsRule() .setAllowedHeaders(datalakeRule.getAllowedHeaders()) .setAllowedMethods(datalakeRule.getAllowedMethods()) .setAllowedOrigins(datalakeRule.getAllowedOrigins()) .setExposedHeaders(datalakeRule.getExposedHeaders()) .setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds()); } static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) { if (datalakeMetrics == null) { return null; } return new BlobMetrics() .setEnabled(datalakeMetrics.isEnabled()) .setIncludeApis(datalakeMetrics.isIncludeApis()) .setVersion(datalakeMetrics.getVersion()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy())); } static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) { if (datalakePolicy == null) { return null; } return new BlobRetentionPolicy() .setDays(datalakePolicy.getDays()) .setEnabled(datalakePolicy.isEnabled()); } static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) { if (blobItem == null) { return null; } return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(), blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays()); } static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) { return new PathDeletedItem(blobPrefix.getName(), true, null, null, null); } static CustomerProvidedKey toBlobCustomerProvidedKey( com.azure.storage.file.datalake.models.CustomerProvidedKey key) { if (key == null) { return null; } return new CustomerProvidedKey(key.getKey()); } static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) { if (info == null) { return null; } return new CpkInfo() .setEncryptionKey(info.getEncryptionKey()) .setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString( info.getEncryptionAlgorithm().toString())) .setEncryptionKeySha256(info.getEncryptionKeySha256()); } static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) { if (fileSystemEncryptionScope == null) { return null; } return new BlobContainerEncryptionScope() .setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented()); } }